Beispiel #1
0
    def __init__(self, mission, vehicle, filepath):
        self.mission = mission
        self.vehicle = vehicle
        self.filepath = filepath
        self.rdi_altitude = None
        self.rdi_orientation = None
        self.data = ""

        outpath = get_processed_folder(self.filepath)
        config_filename = outpath / "mission.cfg"

        outpath = outpath / "dRAWLOGS_cv"

        if not outpath.exists():
            outpath.mkdir(parents=True)

        self.nav_file = outpath / "combined.RAW.auv"

        with config_filename.open("w") as f:
            data = ("MAG_VAR_LAT " + str(float(self.mission.origin.latitude)) +
                    "\nMAG_VAR_LNG " +
                    str(float(self.mission.origin.longitude)) +
                    '\nMAG_VAR_DATE "' + str(self.mission.origin.date) + '"' +
                    "\nMAGNETIC_VAR_DEG " + str(float(0)))
            f.write(data)
        # keep the file opened
        self.f = self.nav_file.open("w")
def call_rescale(args):
    path = Path(args.path).resolve()

    time_string = time.strftime("%Y%m%d_%H%M%S", time.localtime())
    Console.set_logging_file(
        get_processed_folder(path) /
        ("log/" + time_string + "_correct_images_rescale.log"))

    correct_config, camerasystem = load_configuration_and_camera_system(
        path, args.suffix)

    # install freeimage plugins if not installed
    imageio.plugins.freeimage.download()

    if correct_config.camerarescale is None:
        Console.error("Camera rescale configuration not found")
        Console.error(
            "Please populate the correct_images.yaml file with a rescale configuration"
        )
        Console.quit("Malformed correct_images.yaml file")

    # obtain parameters for rescale from correct_config
    rescale_cameras = correct_config.camerarescale.rescale_cameras

    for camera in rescale_cameras:
        corrections.rescale_camera(path, camerasystem, camera)
    Console.info("Rescaling completed for all cameras ...")
Beispiel #3
0
    def set_path(self, path):
        """Set the path for the corrector"""
        # The path is expected to be non-empty, so we no longer check for it
        self.path_raw = get_raw_folder(path)
        self.path_processed = get_processed_folder(path)
        self.path_config = get_config_folder(path)

        self.mission = Mission(self.path_raw / "mission.yaml")

        self.user_specified_image_list = None  # To be overwritten on parse/process
        self.user_specified_image_list_parse = None
        self.user_specified_image_list_process = None

        self.camera_name = self.camera.name
        # Find the camera topic corresponding to the name
        for camera in self.mission.image.cameras:
            if camera.name == self.camera_name and camera.topic is not None:
                self.camera.topic = camera.topic
                break
        self._type = self.camera.type
        # Load camera configuration
        image_properties = self.camera.image_properties
        self.image_height = image_properties[0]
        self.image_width = image_properties[1]
        self.image_channels = image_properties[2]
def call_process(args):
    """Perform processing on source images using correction parameters
    generated in parse and outputs corrected images

    Parameters
    -----------
    args : parse_args object
        User provided arguments for path of source images
    """

    path = Path(args.path).resolve()

    time_string = time.strftime("%Y%m%d_%H%M%S", time.localtime())
    Console.set_logging_file(
        get_processed_folder(path) /
        ("log/" + time_string + "_correct_images_process.log"))

    correct_config, camerasystem = load_configuration_and_camera_system(
        path, args.suffix)

    for camera in camerasystem.cameras:
        Console.info("Processing for camera", camera.name)

        if len(camera.image_list) == 0:
            Console.info(
                "No images found for the camera at the path provided...")
            continue
        else:
            corrector = Corrector(args.force, args.suffix, camera,
                                  correct_config, path)
            if corrector.camera_found:
                corrector.process()
    Console.info("Process completed for all cameras...")
def call_process_data(args):
    time_string = time.strftime("%Y%m%d_%H%M%S", time.localtime())

    Console.set_verbosity(args.verbose)
    Console.set_logging_file(
        get_processed_folder(args.path) /
        ("log/" + time_string + "_auv_nav_process.log"))
    auv_nav_path = get_config_folder(args.path) / "auv_nav.yaml"
    if auv_nav_path.exists():
        auv_nav_path_log = get_processed_folder(
            args.path) / ("log/" + time_string + "_auv_nav.yaml")
        auv_nav_path.copy(auv_nav_path_log)
    process(
        args.path,
        args.force,
        args.start_datetime,
        args.end_datetime,
        args.relative_pose_uncertainty,
        args.start_image_identifier,
        args.end_image_identifier,
    )
Beispiel #6
0
def find_navigation_csv(base,
                        distance_path="json_renav_*",
                        solution="ekf",
                        camera="Cam51707923"):
    base_processed = get_processed_folder(base)
    json_list = list(base_processed.glob(distance_path))
    if len(json_list) == 0:
        Console.quit("No navigation solution could be found at",
                     base_processed)
    nav_csv_filepath = json_list[0] / ("csv/" + solution)
    nav_csv_filepath = nav_csv_filepath / (
        "auv_" + solution + "_" + camera + ".csv")  # noqa
    if not nav_csv_filepath.exists():
        Console.quit("No navigation solution could be found at",
                     nav_csv_filepath)
    return nav_csv_filepath
Beispiel #7
0
    def parse(self, path_list, correct_config_list):
        # both path_list and correct_config_list are assumed to be valid + equivalent
        for i in range(len(path_list)):  # for each dive
            path = path_list[i]
            correct_config = correct_config_list[i]

            Console.info("Parsing dive:", path)
            # Console.info("Setting path...")
            self.set_path(path)  # update the dive path

            # Console.info("Loading configuration...")
            self.load_configuration(correct_config)  # load the dive config
            # Set the user specified list - if any
            self.user_specified_image_list = self.user_specified_image_list_parse
            # Update list of images by appending user-defined list
            # TODO: this list must be populated from AFTER loading the configuration and BEFORE getting image list
            self.get_imagelist()

        # Show the total number of images after filtering + merging the dives. It should match the sum of the filtered images of each dive.
        if len(path_list) > 1:
            Console.info(
                "Total number of images after merging dives:",
                len(self.camera_image_list),
            )

        Console.info("Output directories created / existing...")

        if self.correction_method == "colour_correction":
            self.get_altitude_and_depth_maps()
            self.generate_attenuation_correction_parameters()

            for i in range(len(path_list)):  # for each dive
                path = get_processed_folder(path_list[i])
                attn_dir = Path(self.attenuation_parameters_folder)
                relative_folder = attn_dir.relative_to(self.path_processed)
                dest_dir = path / relative_folder
                if dest_dir == attn_dir:
                    # Do not copy over the original files
                    continue
                copy_file_if_exists(self.attenuation_params_filepath, dest_dir)
                copy_file_if_exists(self.correction_gains_filepath, dest_dir)
                copy_file_if_exists(self.corrected_mean_filepath, dest_dir)
                copy_file_if_exists(self.corrected_std_filepath, dest_dir)
                copy_file_if_exists(self.raw_mean_filepath, dest_dir)
                copy_file_if_exists(self.raw_std_filepath, dest_dir)
        elif self.correction_method == "manual_balance":
            Console.info("run process for manual_balance...")
    def __init__(self, filepath, force_overwite=False, overwrite_all=False):
        filepath = Path(filepath).resolve()
        self.filepath = get_raw_folder(filepath)
        self.fo = force_overwite
        self.foa = overwrite_all

        if self.foa:
            self.fo = True

        self.configuration_path = (get_config_folder(self.filepath.parent) /
                                   "calibration")
        calibration_config_file = self.configuration_path / "calibration.yaml"
        if calibration_config_file.exists():
            Console.info("Loading existing calibration.yaml at {}".format(
                calibration_config_file))
        else:
            root = Path(__file__).parents[1]
            default_file = root / "auv_cal/default_yaml" / "default_calibration.yaml"
            Console.warn("Cannot find {}, generating default from {}".format(
                calibration_config_file, default_file))
            # save localisation yaml to processed directory
            default_file.copy(calibration_config_file)

            Console.warn("Edit the file at: \n\t" +
                         str(calibration_config_file))
            Console.warn(
                "Try to use relative paths to the calibration datasets")
            Console.quit(
                "Modify the file calibration.yaml and run this code again.")

        with calibration_config_file.open("r") as stream:
            self.calibration_config = yaml.safe_load(stream)

        # Create the calibration folder at the same level as the dives
        self.output_path = get_processed_folder(
            self.filepath.parent) / "calibration"
        if not self.output_path.exists():
            self.output_path.mkdir(parents=True)
        if not self.configuration_path.exists():
            self.configuration_path.mkdir(parents=True)
    def mono(self):
        for c in self.calibration_config["cameras"]:
            cam_name = c["name"]
            # Find if the calibration file exists
            calibration_file = self.output_path / str("mono_" + cam_name +
                                                      ".yaml")
            Console.info("Looking for a calibration file at " +
                         str(calibration_file))
            if calibration_file.exists() and not self.fo:
                Console.warn(
                    "The camera " + c["name"] +
                    " has already been calibrated. If you want to overwrite " +
                    "the JSON, use the -F flag.")
            else:
                Console.info(
                    "The camera is not calibrated, running mono calibration..."
                )
                filepaths = build_filepath(
                    get_processed_folder(self.filepath),
                    c["camera_calibration"]["path"],
                )

                if "glob_pattern" not in c["camera_calibration"]:
                    Console.error(
                        "Could not find the key glob_pattern for the camera ",
                        c["name"],
                    )
                    Console.quit("glob_pattern expected in calibration.yaml")

                calibrate_mono(
                    cam_name,
                    filepaths,
                    str(c["camera_calibration"]["glob_pattern"]),
                    self.calibration_config["camera_calibration"],
                    calibration_file,
                    self.fo,
                    self.foa,
                )
Beispiel #10
0
def parse(filepath, force_overwrite, merge):
    # Filepath is a list. Get the first element by default
    for p in filepath:
        parse_single(p, force_overwrite)

    if merge and len(filepath) > 1:
        Console.info("Merging the dives...")

        # Generate a merged output
        dates = []
        # Collect json files
        json_files = []
        for p in filepath:
            folder_name = Path(p).name
            yyyy = int(folder_name[0:4])
            mm = int(folder_name[4:6])
            dd = int(folder_name[6:8])
            hh = int(folder_name[9:11])
            mm1 = int(folder_name[11:13])
            ss = int(folder_name[13:15])
            d = datetime(yyyy, mm, dd, hh, mm1, ss)
            dates.append(d)

            outpath = get_processed_folder(p)
            nav_file = outpath / "nav/nav_standard.json"
            json_files.append(nav_file)

        data_list = merge_json_files(json_files)

        # Get first and last dates
        dates.sort()
        foldername = (dates[0].strftime("%Y%m%d%H%M%S") + "_" +
                      dates[-1].strftime("%Y%m%d%H%M%S") + "_merged")
        # Create folder if it does not exist
        processed_path = get_processed_folder(filepath[0]).parent
        nav_folder = processed_path / foldername / "nav"
        # make output path if not exist
        if not nav_folder.exists():
            try:
                nav_folder.mkdir(parents=True, exist_ok=True)
            except Exception as e:
                print("Warning:", e)

        # create file (overwrite if exists)
        filename = "nav_standard.json"
        nav_file = nav_folder / filename
        Console.info("Writing the ouput to:", str(nav_file))
        with nav_file.open("w") as fileout:
            json.dump(data_list, fileout, indent=2)
        fileout.close()

        # copy mission.yaml and vehicle.yaml to processed folder for
        # process step
        mission_processed = get_processed_folder(filepath[0]) / "mission.yaml"
        vehicle_processed = get_processed_folder(filepath[0]) / "vehicle.yaml"
        mission_merged = processed_path / foldername / "mission.yaml"
        vehicle_merged = processed_path / foldername / "vehicle.yaml"
        mission_processed.copy(mission_merged)
        vehicle_processed.copy(vehicle_merged)

        # interlace the data based on timestamps
        Console.info("Interlacing merged data...")
        parse_interlacer(nav_folder, filename)
        Console.info(
            "...done interlacing merged data. Output saved to",
            nav_folder / filename,
        )
        plot_parse_data(nav_folder)
        Console.info("Complete merging data")
Beispiel #11
0
def parse_single(filepath, force_overwrite):
    # initiate data and processing flags
    filepath = Path(filepath).resolve()
    filepath = get_raw_folder(filepath)

    if not force_overwrite:
        existing_files = check_output_files_exist(
            get_processed_folder(filepath))
        if existing_files:
            msg = ("It looks like this dataset has already been parsed.\n" +
                   "The following file(s) already exist:\n" + existing_files +
                   "If you would like auv_nav to overwrite existing file," +
                   " rerun it with the flag -F.\n" +
                   "Example:   auv_nav parse -F PATH")
            Console.warn(msg)
            Console.warn("Dataset skipped")
            return

    ftype = "oplab"

    # load mission.yaml config file

    mission_file = filepath / "mission.yaml"
    vehicle_file = filepath / "vehicle.yaml"
    mission_file = get_raw_folder(mission_file)
    vehicle_file = get_raw_folder(vehicle_file)
    Console.info("Loading mission.yaml at", mission_file)
    mission = Mission(mission_file)

    Console.info("Loading vehicle.yaml at", vehicle_file)
    vehicle = Vehicle(vehicle_file)

    # copy mission.yaml and vehicle.yaml to processed folder for process step
    mission_processed = get_processed_folder(mission_file)
    vehicle_processed = get_processed_folder(vehicle_file)

    # Write mission with metadata (username, date and hostname)
    mission.write(mission_processed)
    # mission_file.copy(mission_processed)
    vehicle.write(vehicle_processed)

    # check for recognised formats and create nav file
    outpath = get_processed_folder(filepath)
    outpath = outpath / "nav"
    filename = "nav_standard.json"

    # make file path if not exist
    if not outpath.is_dir():
        try:
            outpath.mkdir()
        except Exception as e:
            print("Warning:", e)

    Console.info("Loading raw data...")

    if multiprocessing.cpu_count() < 4:
        cpu_to_use = 1
    else:
        cpu_to_use = multiprocessing.cpu_count() - 2

    try:
        pool = multiprocessing.Pool(cpu_to_use)
    except AttributeError as e:
        print(
            "Error: ",
            e,
            "\n===============\nThis error is known to \
                happen when running the code more than once from the same \
                console in Spyder. Please run the code from a new console \
                to prevent this error from happening. You may close the \
                current console.\n==============",
        )
    pool_list = []

    # read in, parse data and write data
    if not mission.image.empty():
        if mission.image.format == "acfr_standard" or mission.image.format == "unagi":
            pool_list.append(
                pool.apply_async(
                    parse_acfr_images,
                    [mission, vehicle, "images", ftype, outpath],
                ))
        elif mission.image.format == "seaxerocks_3":
            pool_list.append(
                pool.apply_async(
                    parse_seaxerocks_images,
                    [mission, vehicle, "images", ftype, outpath],
                ))
        elif (mission.image.format == "biocam"
              or mission.image.format == "biocam4000_15c"):
            pool_list.append(
                pool.apply_async(
                    parse_biocam_images,
                    [mission, vehicle, "images", ftype, outpath],
                ))
        elif mission.image.format == "ntnu_stereo":
            pool_list.append(
                pool.apply_async(
                    parse_ntnu_stereo_images,
                    [mission, vehicle, "images", ftype, outpath],
                ))
        elif mission.image.format == "rosbag_extracted_images":
            pool_list.append(
                pool.apply_async(
                    parse_rosbag_extracted_images,
                    [mission, vehicle, "images", ftype, outpath],
                ))
        elif mission.image.format == "rosbag":
            pool_list.append(
                pool.apply_async(
                    parse_rosbag,
                    [mission, vehicle, "images", ftype, outpath],
                ))
        else:
            Console.quit("Mission image format", mission.image.format,
                         "not supported.")
    if not mission.usbl.empty():
        if mission.usbl.format == "gaps":
            pool_list.append(
                pool.apply_async(parse_gaps,
                                 [mission, vehicle, "usbl", ftype, outpath]))
        elif mission.usbl.format == "usbl_dump":
            pool_list.append(
                pool.apply_async(parse_usbl_dump,
                                 [mission, vehicle, "usbl", ftype, outpath]))
        elif mission.usbl.format == "NOC_nmea":
            pool_list.append(
                pool.apply_async(parse_NOC_nmea,
                                 [mission, vehicle, "usbl", ftype, outpath]))
        elif mission.usbl.format == "eiva_navipac":
            pool_list.append(
                pool.apply_async(
                    parse_eiva_navipac,
                    [mission, vehicle, "usbl", ftype, outpath],
                ))
        elif mission.usbl.format == "rosbag":
            pool_list.append(
                pool.apply_async(
                    parse_rosbag,
                    [mission, vehicle, "usbl", ftype, outpath],
                ))
        else:
            Console.quit("Mission usbl format", mission.usbl.format,
                         "not supported.")

    if not mission.velocity.empty():
        if mission.velocity.format == "phins":
            pool_list.append(
                pool.apply_async(
                    parse_phins,
                    [mission, vehicle, "velocity", ftype, outpath]))
        elif mission.velocity.format == "ae2000":
            pool_list.append(
                pool.apply_async(
                    parse_ae2000,
                    [mission, vehicle, "velocity", ftype, outpath],
                ))
        elif mission.velocity.format == "alr":
            pool_list.append(
                pool.apply_async(
                    parse_alr,
                    [mission, vehicle, "velocity", ftype, outpath],
                ))
        elif mission.velocity.format == "autosub":
            pool_list.append(
                pool.apply_async(
                    parse_autosub,
                    [mission, vehicle, "velocity", ftype, outpath],
                ))
        elif mission.velocity.format == "rdi":
            pool_list.append(
                pool.apply_async(
                    parse_rdi, [mission, vehicle, "velocity", ftype, outpath]))
        elif mission.velocity.format == "ntnu_dvl":
            pool_list.append(
                pool.apply_async(
                    parse_ntnu_dvl,
                    [mission, vehicle, "velocity", ftype, outpath],
                ))
        elif mission.usbl.format == "rosbag":
            pool_list.append(
                pool.apply_async(
                    parse_rosbag,
                    [mission, vehicle, "velocity", ftype, outpath],
                ))
        else:
            Console.quit(
                "Mission velocity format",
                mission.velocity.format,
                "not supported.",
            )

    if not mission.orientation.empty():
        if mission.orientation.format == "phins":
            pool_list.append(
                pool.apply_async(
                    parse_phins,
                    [mission, vehicle, "orientation", ftype, outpath],
                ))
        elif mission.orientation.format == "ae2000":
            pool_list.append(
                pool.apply_async(
                    parse_ae2000,
                    [mission, vehicle, "orientation", ftype, outpath],
                ))
        elif mission.orientation.format == "alr":
            pool_list.append(
                pool.apply_async(
                    parse_alr,
                    [mission, vehicle, "orientation", ftype, outpath],
                ))
        elif mission.orientation.format == "autosub":
            pool_list.append(
                pool.apply_async(
                    parse_autosub,
                    [mission, vehicle, "orientation", ftype, outpath],
                ))
        elif mission.orientation.format == "rdi":
            pool_list.append(
                pool.apply_async(
                    parse_rdi,
                    [mission, vehicle, "orientation", ftype, outpath],
                ))
        elif mission.orientation.format == "eiva_navipac":
            pool_list.append(
                pool.apply_async(
                    parse_eiva_navipac,
                    [mission, vehicle, "orientation", ftype, outpath],
                ))
        elif mission.usbl.format == "rosbag":
            pool_list.append(
                pool.apply_async(
                    parse_rosbag,
                    [mission, vehicle, "orientation", ftype, outpath],
                ))
        else:
            Console.quit(
                "Mission orientation format",
                mission.orientation.format,
                "not supported.",
            )

    if not mission.depth.empty():
        if mission.depth.format == "phins":
            pool_list.append(
                pool.apply_async(parse_phins,
                                 [mission, vehicle, "depth", ftype, outpath]))
        elif mission.depth.format == "ae2000":
            pool_list.append(
                pool.apply_async(parse_ae2000,
                                 [mission, vehicle, "depth", ftype, outpath]))
        elif mission.depth.format == "alr":
            pool_list.append(
                pool.apply_async(parse_alr,
                                 [mission, vehicle, "depth", ftype, outpath]))
        elif mission.depth.format == "autosub":
            pool_list.append(
                pool.apply_async(parse_autosub,
                                 [mission, vehicle, "depth", ftype, outpath]))
        elif mission.depth.format == "gaps":
            pool_list.append(
                pool.apply_async(parse_gaps,
                                 [mission, vehicle, "depth", ftype, outpath]))
        elif mission.depth.format == "eiva_navipac":
            pool_list.append(
                pool.apply_async(
                    parse_eiva_navipac,
                    [mission, vehicle, "depth", ftype, outpath],
                ))
        elif mission.usbl.format == "rosbag":
            pool_list.append(
                pool.apply_async(
                    parse_rosbag,
                    [mission, vehicle, "depth", ftype, outpath],
                ))
        else:
            Console.quit("Mission depth format", mission.depth.format,
                         "not supported.")

    if not mission.altitude.empty():
        if mission.altitude.format == "phins":
            pool_list.append(
                pool.apply_async(
                    parse_phins,
                    [mission, vehicle, "altitude", ftype, outpath]))
        elif mission.altitude.format == "ae2000":
            pool_list.append(
                pool.apply_async(
                    parse_ae2000,
                    [mission, vehicle, "altitude", ftype, outpath],
                ))
        elif mission.altitude.format == "alr":
            pool_list.append(
                pool.apply_async(
                    parse_alr,
                    [mission, vehicle, "altitude", ftype, outpath],
                ))
        elif mission.altitude.format == "autosub":
            pool_list.append(
                pool.apply_async(
                    parse_autosub,
                    [mission, vehicle, "altitude", ftype, outpath],
                ))
        elif mission.altitude.format == "rdi":
            pool_list.append(
                pool.apply_async(
                    parse_rdi, [mission, vehicle, "altitude", ftype, outpath]))
        elif mission.altitude.format == "ntnu_dvl":
            pool_list.append(
                pool.apply_async(
                    parse_ntnu_dvl,
                    [mission, vehicle, "altitude", ftype, outpath],
                ))
        elif mission.usbl.format == "rosbag":
            pool_list.append(
                pool.apply_async(
                    parse_rosbag,
                    [mission, vehicle, "altitude", ftype, outpath],
                ))
        else:
            Console.quit(
                "Mission altitude format",
                mission.altitude.format,
                "not supported.",
            )

    if not mission.tide.empty():
        if mission.tide.format == "NOC_polpred":
            tide_list = parse_NOC_polpred(mission, vehicle, "tide", ftype,
                                          outpath)
        else:
            Console.quit("Mission tide format", mission.tide.format,
                         "not supported.")
    else:
        tide_list = None

    pool.close()
    pool.join()

    Console.info("...done loading raw data.")
    Console.info("Compile data list...")

    data_list = [[{
        "epoch_timestamp":
        0.0,
        "class":
        "origin",
        "category":
        "origin",
        "data": [{
            "latitude": mission.origin.latitude,
            "longitude": mission.origin.longitude,
            "crs": mission.origin.crs,
            "date": mission.origin.date,
        }],
    }]]

    # Set advance True/False flag so not comparing for every i in pool_list
    if mission.image.format == "biocam":
        correcting_timestamps = True
    else:
        correcting_timestamps = False
    for i in pool_list:
        results = i.get()
        # If current retrieved data is DEPTH
        # and if TIDE data is available
        if len(results) < 1:
            continue
        if results[0] is None:
            Console.warn(
                "Some results are empty. Please check whether this is correct or not"
            )
            continue
        if correcting_timestamps:
            if results[0]["category"] == "image":
                Console.info("Correction of BioCam cpu timestamps...")
                results = correct_timestamps(results)
        if (results[0]["category"] == Category.DEPTH
                or results[0]["category"] == Category.USBL):
            if not mission.tide.empty():
                # proceed to tidal correction
                Console.info("Tidal correction of depth vector...")
                # Offset depth to acknowledge for tides
                j = 0
                for k in range(len(results)):
                    while (j < len(tide_list)
                           and tide_list[j]["epoch_timestamp"] <
                           results[k]["epoch_timestamp"]):
                        j = j + 1

                    if j >= 1:
                        _result = interpolate(
                            results[k]["epoch_timestamp"],
                            tide_list[j - 1]["epoch_timestamp"],
                            tide_list[j]["epoch_timestamp"],
                            tide_list[j - 1]["data"][0]["height"],
                            tide_list[j]["data"][0]["height"],
                        )
                        if results[0]["category"] == Category.DEPTH:
                            results[k]["data"][0]["depth"] = (
                                results[k]["data"][0]["depth"] - _result)
                        elif results[0]["category"] == Category.USBL:
                            results[k]["data_target"][4]["depth"] = (
                                results[k]["data_target"][4]["depth"] -
                                _result)
        data_list.append(results)

    Console.info("...done compiling data list.")

    Console.info("Writing to output file...")
    data_list_temp = []
    for i in data_list:
        data_list_temp += i

    # create file (overwrite if exists)
    nav_file = outpath / filename
    with nav_file.open("w") as fileout:
        json.dump(data_list_temp, fileout, indent=2)
    fileout.close()
    Console.info("...done writing to output file.")

    del data_list_temp
    del data_list

    # interlace the data based on timestamps
    Console.info("Interlacing data...")
    parse_interlacer(outpath, filename)
    Console.info("...done interlacing data. Output saved to",
                 outpath / filename)
    plot_parse_data(outpath, ftype)
    Console.info("Complete parse data")
Beispiel #12
0
def acfr_to_oplab(args):
    csv_filepath = Path(args.output_folder)
    force_overwite = args.force
    if not args.vehicle_pose and not args.stereo_pose:
        Console.error("Please provide at least one file")
        Console.quit("Missing comandline arguments")
    if args.vehicle_pose:
        Console.info("Vehicle pose provided! Converting it to DR CSV...")
        vpp = AcfrVehiclePoseParser(args.vehicle_pose)
        dr = vpp.get_dead_reckoning()
        csv_filename = "auv_acfr_centre.csv"

        if (csv_filepath / csv_filename).exists() and not force_overwite:
            Console.error(
                "The DR file already exists. Use -F to force overwrite it")
            Console.quit("Default behaviour: not to overwrite results")
        else:
            write_csv(csv_filepath, dr, csv_filename, csv_flag=True)
            Console.info("Output vehicle pose written to",
                         csv_filepath / csv_filename)
    if args.stereo_pose:
        Console.info("Stereo pose provided! Converting it to camera CSVs...")
        spp = AcfrStereoPoseParser(args.stereo_pose)
        cam1, cam2 = spp.get_cameras()
        if (csv_filepath / "auv_dr_LC.csv").exists() and not force_overwite:
            Console.error(
                "The camera files already exists. Use -F to force overwrite it"
            )
            Console.quit("Default behaviour: not to overwrite results")
        else:
            write_csv(csv_filepath, cam1, "auv_acfr_LC", csv_flag=True)
            write_csv(csv_filepath, cam2, "auv_acfr_RC", csv_flag=True)
            Console.info("Output camera files written at", csv_filepath)

        if args.dive_folder is None:
            return

        # If the user provides a dive path, we can interpolate the ACFR navigation
        # to the laser timestamps
        path_processed = get_processed_folder(args.dive_folder)
        nav_standard_file = path_processed / "nav" / "nav_standard.json"
        nav_standard_file = get_processed_folder(nav_standard_file)
        Console.info("Loading json file {}".format(nav_standard_file))

        start_datetime = ""
        finish_datetime = ""

        file3 = csv_filepath / "auv_acfr_laser.csv"
        if file3.exists() and not force_overwite:
            Console.error(
                "The laser file already exists. Use -F to force overwrite it")
            Console.quit("Default behaviour: not to overwrite results")

        with nav_standard_file.open("r") as nav_standard:
            parsed_json_data = json.load(nav_standard)

            if start_datetime == "":
                epoch_start_time = epoch_from_json(parsed_json_data[1])
                start_datetime = epoch_to_datetime(epoch_start_time)
            else:
                epoch_start_time = string_to_epoch(start_datetime)
            if finish_datetime == "":
                epoch_finish_time = epoch_from_json(parsed_json_data[-1])
                finish_datetime = epoch_to_datetime(epoch_finish_time)
            else:
                epoch_finish_time = string_to_epoch(finish_datetime)

            Console.info("Interpolating laser to ACFR stereo pose data...")
            fileout3 = file3.open(
                "w"
            )  # ToDo: Check if file exists and only overwrite if told ('-F')
            fileout3.write(cam1[0].get_csv_header())
            for i in range(len(parsed_json_data)):
                Console.progress(i, len(parsed_json_data))
                epoch_timestamp = parsed_json_data[i]["epoch_timestamp"]
                if (epoch_timestamp >= epoch_start_time
                        and epoch_timestamp <= epoch_finish_time):
                    if "laser" in parsed_json_data[i]["category"]:
                        filename = parsed_json_data[i]["filename"]
                        c3_interp = interpolate_camera(epoch_timestamp, cam1,
                                                       filename)
                        fileout3.write(c3_interp.to_csv_row())
            Console.info("Done! Laser file available at", str(file3))
Beispiel #13
0
def hybis_to_oplab(args):
    if args.navigation_file is None:
        Console.error("Please provide a navigation file")
        Console.quit("Missing comandline arguments")
    if args.image_path is None:
        Console.error("Please provide an image path")
        Console.quit("Missing comandline arguments")
    if args.reference_lat is None:
        Console.error("Please provide a reference_lat")
        Console.quit("Missing comandline arguments")
    if args.reference_lon is None:
        Console.error("Please provide a reference_lon")
        Console.quit("Missing comandline arguments")
    if args.output_folder is None:
        Console.error("Please provide an output path")
        Console.quit("Missing comandline arguments")
    parser = HybisParser(
        args.navigation_file,
        args.image_path,
        args.reference_lat,
        args.reference_lon,
    )
    force_overwite = args.force
    filepath = get_processed_folder(args.output_folder)
    start_datetime = epoch_to_datetime(parser.start_epoch)
    finish_datetime = epoch_to_datetime(parser.finish_epoch)

    # make path for processed outputs
    json_filename = ("json_renav_" + start_datetime[0:8] + "_" +
                     start_datetime[8:14] + "_" + finish_datetime[0:8] + "_" +
                     finish_datetime[8:14])
    renavpath = filepath / json_filename
    if not renavpath.is_dir():
        try:
            renavpath.mkdir()
        except Exception as e:
            print("Warning:", e)
    elif renavpath.is_dir() and not force_overwite:
        # Check if dataset has already been processed
        Console.error(
            "It looks like this dataset has already been processed for the",
            "specified time span.",
        )
        Console.error(
            "The following directory already exist: {}".format(renavpath))
        Console.error(
            "To overwrite the contents of this directory rerun auv_nav with",
            "the flag -F.",
        )
        Console.error("Example:   auv_nav process -F PATH")
        Console.quit("auv_nav process would overwrite json_renav files")

    output_dr_centre_path = renavpath / "csv" / "dead_reckoning"
    if not output_dr_centre_path.exists():
        output_dr_centre_path.mkdir(parents=True)
    camera_name = "hybis_camera"
    output_dr_centre_path = output_dr_centre_path / ("auv_dr_" + camera_name +
                                                     ".csv")
    parser.write(output_dr_centre_path)
    parser.write(output_dr_centre_path)
    Console.info("Output written to", output_dr_centre_path)
Beispiel #14
0
def oplab_to_acfr(args):
    if not args.dive_folder:
        Console.error("No dive folder provided. Exiting...")
        Console.quit("Missing comandline arguments")
    if not args.output_folder:
        Console.error("No output folder provided. Exiting...")
        Console.quit("Missing comandline arguments")

    output_folder = get_processed_folder(args.output_folder)
    vf = output_folder / "vehicle_pose_est.data"
    sf = output_folder / "stereo_pose_est.data"
    if (vf.exists() or sf.exists()) and not args.force:
        Console.error(
            "Output folder already exists. Use -F to overwrite. Exiting...")
        Console.quit("Default behaviour: not to overwrite results")

    Console.info("Loading mission.yaml")
    path_processed = get_processed_folder(args.dive_folder)
    mission_file = path_processed / "mission.yaml"
    mission = Mission(mission_file)

    vehicle_file = path_processed / "vehicle.yaml"
    vehicle = Vehicle(vehicle_file)

    origin_lat = mission.origin.latitude
    origin_lon = mission.origin.longitude

    json_list = list(path_processed.glob("json_*"))
    if len(json_list) == 0:
        Console.quit(
            "No navigation solution could be found. Please run ",
            "auv_nav parse and process first",
        )
    navigation_path = path_processed / json_list[0]
    # Try if ekf exists:
    full_navigation_path = navigation_path / "csv" / "ekf"

    vpw = AcfrVehiclePoseWriter(vf, origin_lat, origin_lon)
    vehicle_navigation_file = full_navigation_path / "auv_ekf_centre.csv"
    dataframe = pd.read_csv(vehicle_navigation_file)
    dr_list = []
    for _, row in dataframe.iterrows():
        sb = SyncedOrientationBodyVelocity()
        sb.from_df(row)
        dr_list.append(sb)
    vpw.write(dr_list)
    Console.info("Vehicle pose written to", vf)

    if not mission.image.cameras:
        Console.warn("No cameras found in the mission file.")
        return
    if len(mission.image.cameras) == 1:
        Console.error("Only one camera found in the mission file. Exiting...")
        Console.quit("ACFR stereo pose est requires two cameras.")

    camera0_name = mission.image.cameras[0].name
    camera1_name = mission.image.cameras[1].name

    camera0_nav_file = full_navigation_path / ("auv_ekf_" + camera0_name +
                                               ".csv")
    dataframe = pd.read_csv(camera0_nav_file)
    cam1_list = []
    for _, row in dataframe.iterrows():
        sb = Camera()
        sb.from_df(row)
        cam1_list.append(sb)

    camera1_nav_file = full_navigation_path / ("auv_ekf_" + camera1_name +
                                               ".csv")

    dataframe = pd.read_csv(camera1_nav_file)
    cam2_list = []
    for _, row in dataframe.iterrows():
        sb = Camera()
        sb.from_df(row)
        cam2_list.append(sb)

    spw = AcfrStereoPoseWriter(sf, origin_lat, origin_lon)

    spw.write(cam1_list, cam2_list)
    Console.info("Stereo pose written to", sf)

    Console.info("Generating combined.RAW")
    nav_standard_file = path_processed / "nav" / "nav_standard.json"
    nav_standard_file = get_processed_folder(nav_standard_file)
    Console.info("Loading json file {}".format(nav_standard_file))

    with nav_standard_file.open("r") as nav_standard:
        parsed_json_data = json.load(nav_standard)

    start_datetime = ""
    finish_datetime = ""

    # setup start and finish date time
    if start_datetime == "":
        epoch_start_time = epoch_from_json(parsed_json_data[1])
        start_datetime = epoch_to_datetime(epoch_start_time)
    else:
        epoch_start_time = string_to_epoch(start_datetime)
    if finish_datetime == "":
        epoch_finish_time = epoch_from_json(parsed_json_data[-1])
        finish_datetime = epoch_to_datetime(epoch_finish_time)
    else:
        epoch_finish_time = string_to_epoch(finish_datetime)

    sensors_std = {
        "usbl": {
            "model": "sensor"
        },
        "dvl": {
            "model": "sensor"
        },
        "depth": {
            "model": "sensor"
        },
        "orientation": {
            "model": "sensor"
        },
    }

    exporter = AcfrCombinedRawWriter(mission, vehicle, output_folder)

    # read in data from json file
    # i here is the number of the data packet
    for i in range(len(parsed_json_data)):
        Console.progress(i, len(parsed_json_data))
        epoch_timestamp = parsed_json_data[i]["epoch_timestamp"]
        if epoch_timestamp >= epoch_start_time and epoch_timestamp <= epoch_finish_time:
            if "velocity" in parsed_json_data[i]["category"]:
                if "body" in parsed_json_data[i]["frame"]:
                    # to check for corrupted data point which have inertial
                    # frame data values
                    if "epoch_timestamp_dvl" in parsed_json_data[i]:
                        # confirm time stamps of dvl are aligned with main
                        # clock (within a second)
                        if (abs(parsed_json_data[i]["epoch_timestamp"] -
                                parsed_json_data[i]["epoch_timestamp_dvl"])
                            ) < 1.0:
                            velocity_body = BodyVelocity()
                            velocity_body.from_json(parsed_json_data[i],
                                                    sensors_std["dvl"])
                            exporter.add(velocity_body)
                if "inertial" in parsed_json_data[i]["frame"]:
                    velocity_inertial = InertialVelocity()
                    velocity_inertial.from_json(parsed_json_data[i])
                    exporter.add(velocity_inertial)

            if "orientation" in parsed_json_data[i]["category"]:
                orientation = Orientation()
                orientation.from_json(parsed_json_data[i],
                                      sensors_std["orientation"])
                exporter.add(orientation)

            if "depth" in parsed_json_data[i]["category"]:
                depth = Depth()
                depth.from_json(parsed_json_data[i], sensors_std["depth"])
                exporter.add(depth)

            if "altitude" in parsed_json_data[i]["category"]:
                altitude = Altitude()
                altitude.from_json(parsed_json_data[i])
                exporter.add(altitude)

            if "usbl" in parsed_json_data[i]["category"]:
                usbl = Usbl()
                usbl.from_json(parsed_json_data[i], sensors_std["usbl"])
                exporter.add(usbl)

            if "image" in parsed_json_data[i]["category"]:
                camera1 = Camera()
                # LC
                camera1.from_json(parsed_json_data[i], "camera1")
                exporter.add(camera1)
                camera2 = Camera()
                camera2.from_json(parsed_json_data[i], "camera2")
                exporter.add(camera2)
    def stereo(self):
        if len(self.calibration_config["cameras"]) > 1:
            c0 = self.calibration_config["cameras"][0]
            c1 = self.calibration_config["cameras"][1]
            calibration_file = self.output_path / str("stereo_" + c0["name"] +
                                                      "_" + c1["name"] +
                                                      ".yaml")
            Console.info("Looking for a calibration file at " +
                         str(calibration_file))
            if calibration_file.exists() and not self.fo:
                Console.quit(
                    "The stereo pair " + c0["name"] + "_" + c1["name"] +
                    " has already been calibrated. If you want to overwrite " +
                    "the calibration, use the -F flag.")
            Console.info(
                "The stereo camera is not calibrated, running stereo",
                "calibration...",
            )

            left_filepaths = build_filepath(
                get_processed_folder(self.filepath),
                c0["camera_calibration"]["path"],
            )
            right_filepaths = build_filepath(
                get_processed_folder(self.filepath),
                c1["camera_calibration"]["path"],
            )
            left_name = c0["name"]
            if "glob_pattern" not in c0["camera_calibration"]:
                Console.error(
                    "Could not find the key glob_pattern for the camera ",
                    c0["name"],
                )
                Console.quit("glob_pattern expected in calibration.yaml")
            left_extension = str(c0["camera_calibration"]["glob_pattern"])
            right_name = c1["name"]
            if "glob_pattern" not in c1["camera_calibration"]:
                Console.error(
                    "Could not find the key glob_pattern for the camera ",
                    c1["name"],
                )
                Console.quit("glob_pattern expected in calibration.yaml")
            right_extension = str(c1["camera_calibration"]["glob_pattern"])
            left_calibration_file = self.output_path / str("mono_" +
                                                           left_name + ".yaml")
            right_calibration_file = self.output_path / str("mono_" +
                                                            right_name +
                                                            ".yaml")
            if (not left_calibration_file.exists()
                    or not right_calibration_file.exists()):
                if not left_calibration_file.exists():
                    Console.warn(
                        "Could not find a monocular calibration file " +
                        str(left_calibration_file) + "...")
                if not right_calibration_file.exists():
                    Console.warn(
                        "Could not find a monocular calibration file " +
                        str(right_calibration_file) + "...")
                self.mono()
            if left_calibration_file.exists(
            ) and right_calibration_file.exists():
                Console.info("Loading previous monocular calibrations at \
                                \n\t * {}\n\t * {}".format(
                    str(left_calibration_file), str(right_calibration_file)))
            calibrate_stereo(
                left_name,
                left_filepaths,
                left_extension,
                left_calibration_file,
                right_name,
                right_filepaths,
                right_extension,
                right_calibration_file,
                self.calibration_config["camera_calibration"],
                calibration_file,
                self.fo,
                self.foa,
            )
        # Check for a second stereo pair
        if len(self.calibration_config["cameras"]) > 2:
            c0 = self.calibration_config["cameras"][0]
            c2 = self.calibration_config["cameras"][2]
            calibration_file = self.output_path / str("stereo_" + c0["name"] +
                                                      "_" + c2["name"] +
                                                      ".yaml")
            Console.info("Looking for a calibration file at " +
                         str(calibration_file))
            if calibration_file.exists() and not self.fo:
                Console.quit(
                    "The stereo pair " + c0["name"] + "_" + c2["name"] +
                    " has already been calibrated. If you want to overwrite " +
                    "the calibration, use the -F flag.")
            Console.info(
                "The stereo camera is not calibrated, running stereo",
                "calibration...",
            )
            left_name = c0["name"]
            left_filepaths = build_filepath(
                get_processed_folder(self.filepath),
                c0["camera_calibration"]["path"],
            )
            left_extension = str(c0["camera_calibration"]["glob_pattern"])
            right_name = c2["name"]
            right_filepaths = build_filepath(self.filepath,
                                             c2["camera_calibration"]["path"])
            right_extension = str(c2["camera_calibration"]["glob_pattern"])
            left_calibration_file = self.output_path / str("mono_" +
                                                           left_name + ".yaml")
            right_calibration_file = self.output_path / str("mono_" +
                                                            right_name +
                                                            ".yaml")
            if (not left_calibration_file.exists()
                    or not right_calibration_file.exists()):
                if not left_calibration_file.exists():
                    Console.warn(
                        "Could not find a monocular calibration file " +
                        str(left_calibration_file) + "...")
                if not right_calibration_file.exists():
                    Console.warn(
                        "Could not find a monocular calibration file " +
                        str(right_calibration_file) + "...")
                self.mono()
            if left_calibration_file.exists(
            ) and right_calibration_file.exists():
                Console.info("Loading previous monocular calibrations at \
                                \n\t * {}\n\t * {}".format(
                    str(left_calibration_file), str(right_calibration_file)))
            calibrate_stereo(
                left_name,
                left_filepaths,
                left_extension,
                left_calibration_file,
                right_name,
                right_filepaths,
                right_extension,
                right_calibration_file,
                self.calibration_config["camera_calibration"],
                calibration_file,
                self.fo,
                self.foa,
            )

            calibration_file = self.output_path / str("stereo_" + c2["name"] +
                                                      "_" + c0["name"] +
                                                      ".yaml")

            calibrate_stereo(
                right_name,
                right_filepaths,
                right_extension,
                right_calibration_file,
                left_name,
                left_filepaths,
                left_extension,
                left_calibration_file,
                self.calibration_config["camera_calibration"],
                calibration_file,
                self.fo,
                self.foa,
            )
def call_parse_data(args):
    time_string = time.strftime("%Y%m%d_%H%M%S", time.localtime())
    Console.set_logging_file(
        get_processed_folder(args.path[0]) /
        ("log/" + time_string + "_auv_nav_parse.log"))
    parse(args.path, args.force, args.merge)
    def laser_imp(self, c0, c1):
        main_camera_name = c1["name"]
        calibration_file = self.output_path / ("laser_calibration_top_" +
                                               main_camera_name + ".yaml")
        calibration_file_b = self.output_path / ("laser_calibration_bottom_" +
                                                 main_camera_name + ".yaml")
        Console.info("Looking for a calibration file at " +
                     str(calibration_file))
        if calibration_file.exists() and not self.fo:
            Console.quit(
                "The laser planes from cameras " + c1["name"] + " and " +
                c0["name"] +
                " have already been calibrated. If you want to overwite the " +
                "calibration, use the -F flag.")
        Console.info(
            "The laser planes are not calibrated, running laser calibration..."
        )

        # Check if the stereo pair has already been calibrated
        stereo_calibration_file = self.output_path / str("stereo_" +
                                                         c1["name"] + "_" +
                                                         c0["name"] + ".yaml")
        if not stereo_calibration_file.exists():
            Console.warn("Could not find a stereo calibration file " +
                         str(stereo_calibration_file) + "...")
            self.stereo()

        non_laser_cam_name = c0["name"]
        non_laser_cam_filepath = get_raw_folder(self.filepath) / str(
            c0["laser_calibration"]["path"])
        non_laser_cam_extension = str(c0["laser_calibration"]["glob_pattern"])
        laser_cam_name = c1["name"]
        laser_cam_filepath = get_raw_folder(self.filepath) / str(
            c1["laser_calibration"]["path"])
        laser_cam_extension = str(c1["laser_calibration"]["glob_pattern"])
        if not non_laser_cam_filepath.exists():
            non_laser_cam_filepath = get_processed_folder(
                non_laser_cam_filepath)
            if not non_laser_cam_filepath.exists():
                Console.quit(
                    "Could not find stereo image folder, neither in raw nor " +
                    "in processed folder (" + str(non_laser_cam_filepath) +
                    ").")
        if not laser_cam_filepath.exists():
            laser_cam_filepath = get_processed_folder(laser_cam_filepath)
            if not laser_cam_filepath.exists():
                Console.quit(
                    "Could not find stereo image folder, neither in raw nor " +
                    "in processed folder (" + str(laser_cam_filepath) + ").")
        non_laser_cam_filepath = non_laser_cam_filepath.resolve()
        laser_cam_filepath = laser_cam_filepath.resolve()
        Console.info("Reading stereo images of laser line from " +
                     str(non_laser_cam_filepath) + " and " +
                     str(laser_cam_filepath))
        if "skip_first" not in self.calibration_config:
            self.calibration_config["skip_first"] = 0
        calibrate_laser(
            laser_cam_name,
            laser_cam_filepath,
            laser_cam_extension,
            non_laser_cam_name,
            non_laser_cam_filepath,
            non_laser_cam_extension,
            stereo_calibration_file,
            self.calibration_config["laser_calibration"],
            calibration_file,
            calibration_file_b,
            self.calibration_config["skip_first"],
            self.fo,
            self.foa,
        )
def rescale_camera(path, camera_system, camera):
    name = camera.camera_name
    distance_path = camera.distance_path
    interpolate_method = camera.interpolate_method
    image_path = camera.path
    target_pixel_size = camera.target_pixel_size
    maintain_pixels = bool(camera.maintain_pixels)
    output_folder = camera.output_folder

    idx = [
        i for i, camera in enumerate(camera_system.cameras)
        if camera.name == name
    ]

    if len(idx) > 0:
        Console.info("Camera found in camera.yaml file...")
    else:
        Console.warn(
            "Camera not found in camera.yaml file. Please provide a relevant \
            camera.yaml file...")
        return False

    # obtain images to be rescaled
    path_processed = get_processed_folder(path)
    image_path = path_processed / image_path

    # obtain path to distance / altitude values
    full_distance_path = path_processed / distance_path
    full_distance_path = full_distance_path / "csv" / "ekf"
    distance_file = "auv_ekf_" + name + ".csv"
    distance_path = full_distance_path / distance_file

    # obtain focal lengths from calibration file
    camera_params_folder = path_processed / "calibration"
    camera_params_filename = "mono_" + name + ".yaml"
    camera_params_file_path = camera_params_folder / camera_params_filename

    if not camera_params_file_path.exists():
        Console.quit("Calibration file not found...")
    else:
        Console.info("Calibration file found...")

    monocam = MonoCamera(camera_params_file_path)
    focal_length_x = monocam.K[0, 0]
    focal_length_y = monocam.K[1, 1]

    # create output path
    output_directory = path_processed / output_folder
    if not output_directory.exists():
        output_directory.mkdir(parents=True)

    # call rescale function
    dataframe = pd.read_csv(Path(distance_path))
    imagenames_list = [
        filename for filename in os.listdir(image_path)
        if filename[-4:] == ".jpg" or filename[-4:] == ".png"
        or filename[-4:] == ".tif"
    ]
    Console.info("Distance values loaded...")
    rescale_images(
        imagenames_list,
        image_path,
        interpolate_method,
        target_pixel_size,
        dataframe,
        output_directory,
        focal_length_x,
        focal_length_y,
        maintain_pixels,
    )
    return True
def draw_laser(
    left_image_name,
    right_image_name,
    left_maps,
    right_maps,
    top_left,
    top_right,
    bottom_left,
    bottom_right,
    remap,
    camera_name,
):
    """Draw identified laser positions on top of laser line images

    Returns
    -------
        None
    """

    lfilename = left_image_name.name
    rfilename = right_image_name.name
    lprocessed_folder = get_processed_folder(left_image_name.parent)
    rprocessed_folder = get_processed_folder(right_image_name.parent)
    lsaving_folder = lprocessed_folder / ("laser_detection_" + camera_name)
    rsaving_folder = rprocessed_folder / ("laser_detection_" + camera_name)
    if not lsaving_folder.exists():
        lsaving_folder.mkdir(parents=True, exist_ok=True)
    if not rsaving_folder.exists():
        rsaving_folder.mkdir(parents=True, exist_ok=True)
    lfilename = lsaving_folder / lfilename
    rfilename = rsaving_folder / rfilename
    if not lfilename.exists() or not rfilename.exists():
        limg = cv2.imread(str(left_image_name), cv2.IMREAD_ANYDEPTH)
        rimg = cv2.imread(str(right_image_name), cv2.IMREAD_ANYDEPTH)

        height, width = limg.shape[0], limg.shape[1]
        map_height, map_width = left_maps[0].shape[0], left_maps[0].shape[1]
        if height != map_height or width != map_width:
            limg = resize_with_padding(limg, (map_width, map_height))

        height, width = rimg.shape[0], rimg.shape[1]
        map_height, map_width = right_maps[0].shape[0], right_maps[0].shape[1]
        if height != map_height or width != map_width:
            rimg = resize_with_padding(rimg, (map_width, map_height))

        channels = 1
        if limg.ndim == 3:
            channels = limg.shape[-1]

        if remap:
            limg_remap = cv2.remap(limg, left_maps[0], left_maps[1], cv2.INTER_LANCZOS4)
            rimg_remap = cv2.remap(
                rimg, right_maps[0], right_maps[1], cv2.INTER_LANCZOS4
            )
            limg_colour = np.zeros(
                (limg_remap.shape[0], limg_remap.shape[1], 3), dtype=np.uint8
            )
            rimg_colour = np.zeros(
                (rimg_remap.shape[0], rimg_remap.shape[1], 3), dtype=np.uint8
            )
            limg_colour[:, :, 1] = limg_remap
            rimg_colour[:, :, 1] = rimg_remap
        elif channels == 1:
            limg_colour = np.zeros((limg.shape[0], limg.shape[1], 3), dtype=np.uint8)
            rimg_colour = np.zeros((rimg.shape[0], rimg.shape[1], 3), dtype=np.uint8)
            limg_colour[:, :, 1] = limg
            rimg_colour[:, :, 1] = rimg
        else:
            limg_colour = limg
            rimg_colour = rimg
        for p in top_left:
            cv2.circle(limg_colour, (int(p[1]), int(p[0])), 1, (0, 0, 255), -1)
        for p in top_right:
            cv2.circle(rimg_colour, (int(p[1]), int(p[0])), 1, (255, 0, 0), -1)
        for p in bottom_left:
            cv2.circle(limg_colour, (int(p[1]), int(p[0])), 1, (255, 0, 127), -1)
        for p in bottom_right:
            cv2.circle(rimg_colour, (int(p[1]), int(p[0])), 1, (0, 255, 127), -1)
        cv2.imwrite(str(lfilename), limg_colour)
        cv2.imwrite(str(rfilename), rimg_colour)
        Console.info("Saved " + str(lfilename) + " and " + str(rfilename))
    def get_laser_pixels(
        image_name,
        maps,
        min_greenness_ratio,
        k,
        num_columns,
        start_row,
        end_row,
        start_row_b,
        end_row_b,
        two_lasers,
        remap,
        overwrite,
        camera_name,
    ):
        """Get pixel positions of laser line(s) in image

        If laser detector has been run previously, read laser pixel positions
        from file. Otherwis, or if `overwrite` is `True`, run laser detector.

        Returns
        -------
        np.ndarray
            (n x 2) array of y and x values of top laser line
        np.ndarray
            (m x 2) array of y and x values of bottom laser line (empty array
            if `two_lasers` is `False`)
        """

        # Load image
        points = []
        points_b = []

        output_path = get_processed_folder(image_name.parent)
        # print(str(image_name))
        if not output_path.exists():
            output_path.mkdir(parents=True, exist_ok=True)
        fstem = str(image_name.stem) + "_" + camera_name + ".txt"
        filename = output_path / fstem
        if overwrite or not filename.exists():
            points, points_b = find_laser_write_file(
                filename,
                image_name,
                maps,
                min_greenness_ratio,
                k,
                num_columns,
                start_row,
                end_row,
                start_row_b,
                end_row_b,
                two_lasers,
                remap,
                camera_name,
            )
        else:
            # print('Opening ' + filename.name)
            with filename.open("r") as f:
                r = yaml.safe_load(f)
            if r is not None:
                a1 = r["top"]
                if "bottom" in r:
                    a1b = r["bottom"]
                else:
                    a1b = []
                i = 0
                for i in range(len(a1)):
                    points.append([a1[i][0], a1[i][1]])
                points = np.array(points, dtype=np.float32)
                if two_lasers:
                    for i in range(len(a1b)):
                        points_b.append([a1b[i][0], a1b[i][1]])
                    points_b = np.array(points_b, dtype=np.float32)
            else:
                points, points_b = find_laser_write_file(
                    filename,
                    image_name,
                    maps,
                    min_greenness_ratio,
                    k,
                    num_columns,
                    start_row,
                    end_row,
                    start_row_b,
                    end_row_b,
                    two_lasers,
                    remap,
                    camera_name,
                )
        return points, points_b