def calculate_attenuation_parameters(images, distances, image_height,
                                     image_width, image_channels):
    """Compute attenuation parameters for all images

    Parameters
    -----------
    images : numpy.ndarray
        image memmap reshaped as a vector
    distances : numpy.ndarray
        distance memmap reshaped as a vector
    image_height : int
        height of an image
    image_width : int
        width of an image

    Returns
    -------
    numpy.ndarray
        attenuation_parameters
    """

    image_attenuation_parameters = np.empty(
        (image_channels, image_height, image_width, 3), dtype=np.float32)

    # Check available RAM and allocate threads accordingly
    mem = psutil.virtual_memory()
    available_bytes = mem.available  # in bytes
    required_bytes = image_channels * image_height * image_width * 4 * len(
        images)
    num_jobs = min(int(available_bytes / required_bytes), 12)

    # Keep one alive!
    cpus = psutil.cpu_count() - 1

    if num_jobs > cpus:
        num_jobs = cpus
    elif num_jobs <= 0:
        num_jobs = 1
        Console.warn("You might have not enough available RAM to continue.")

    if num_jobs < cpus - 1:
        Console.info("Assigning", num_jobs, "jobs to your CPU to save RAM")
    else:
        Console.info("Assigning", num_jobs, "jobs to your CPU")

    for i_channel in range(image_channels):
        with tqdm_joblib(
                tqdm(desc="Curve fitting", total=image_height * image_width)):
            results = joblib.Parallel(n_jobs=num_jobs, verbose=0)([
                joblib.delayed(curve_fitting)(distances[:, i_pixel],
                                              images[:, i_pixel, i_channel])
                for i_pixel in range(image_height * image_width)
            ])

            attenuation_parameters = np.array(results)
            attenuation_parameters = attenuation_parameters.reshape(
                [image_height, image_width, 3])
        image_attenuation_parameters[i_channel] = attenuation_parameters

    return image_attenuation_parameters
def call_rescale(args):
    path = Path(args.path).resolve()

    time_string = time.strftime("%Y%m%d_%H%M%S", time.localtime())
    Console.set_logging_file(
        get_processed_folder(path) /
        ("log/" + time_string + "_correct_images_rescale.log"))

    correct_config, camerasystem = load_configuration_and_camera_system(
        path, args.suffix)

    # install freeimage plugins if not installed
    imageio.plugins.freeimage.download()

    if correct_config.camerarescale is None:
        Console.error("Camera rescale configuration not found")
        Console.error(
            "Please populate the correct_images.yaml file with a rescale configuration"
        )
        Console.quit("Malformed correct_images.yaml file")

    # obtain parameters for rescale from correct_config
    rescale_cameras = correct_config.camerarescale.rescale_cameras

    for camera in rescale_cameras:
        corrections.rescale_camera(path, camerasystem, camera)
    Console.info("Rescaling completed for all cameras ...")
def save_cloud(filename, cloud):
    """Write list of 3D points to ply file

    Returns
    -------
        None
    """

    cloud_size = len(cloud)

    header_msg = "ply\n\
                  format ascii 1.0\n\
                  element vertex {0}\n\
                  property float x\n\
                  property float y\n\
                  property float z\n\
                  end_header\n".format(
        cloud_size
    )

    Console.info("Saving cloud to " + str(filename))

    with filename.open("w") as f:
        f.write(header_msg)
        for p in cloud:
            f.write("{0:.5f} {1:.5f} {2:.5f}\n".format(p[0][0], p[1][0], p[2][0]))
示例#4
0
def write_csv(
    csv_filepath: Path,
    data_list: Union[List[BodyVelocity], List[Orientation], List[Depth],
                     List[Altitude], List[Usbl], List[Tide], List[Other],
                     List[Camera], List[SyncedOrientationBodyVelocity], ],
    csv_filename: str,
    csv_flag: Optional[bool] = True,
    mutex: Optional[Lock] = None,
):
    if csv_flag is True and len(data_list) > 1:
        csv_file = Path(csv_filepath)

        if not csv_file.exists():
            if mutex is not None:
                mutex.acquire()
            csv_file.mkdir(parents=True, exist_ok=True)
            if mutex is not None:
                mutex.release()

        Console.info("Writing outputs to {}.csv ...".format(csv_filename))
        file = csv_file / "{}.csv".format(csv_filename)
        covariance_file = csv_file / "{}_cov.csv".format(csv_filename)

        fileout = None
        fileout_cov = None
        if len(data_list) > 0:
            fileout = file.open("w")
            # write headers
            str_to_write = data_list[0].get_csv_header()
            fileout.write(str_to_write)
            if hasattr(data_list[0], "covariance") and hasattr(
                    data_list[0], "get_csv_header_cov"):
                if data_list[0].covariance is not None:
                    fileout_cov = covariance_file.open("w")
                    str_to_write_cov = data_list[0].get_csv_header_cov()
                    fileout_cov.write(str_to_write_cov)

            # Loop for each line in csv
            for i in range(len(data_list)):
                try:
                    str_to_write = data_list[i].to_csv_row()
                    if fileout_cov is not None:
                        str_to_write_cov = data_list[i].to_csv_cov_row()
                        fileout_cov.write(str_to_write_cov)
                    fileout.write(str_to_write)
                except IndexError:
                    Console.error(
                        "There is something wrong with camera filenames and \
                        indexing for the file",
                        csv_filename,
                    )
                    Console.quit("Check write_csv function.")
            fileout.close()
            if fileout_cov is not None:
                fileout_cov.close()
            Console.info("... done writing {}.csv.".format(csv_filename))
        else:
            Console.warn("Empty data list {}".format(str(csv_filename)))
示例#5
0
 def print_report(self):
     Console.info("EKF measurements report:")
     for key in self.measurements:
         Console.info(
             "\t",
             key,
             "measurements:",
             self.measurements[key].dropped,
             "/",
             self.measurements[key].total,
             "dropped",
         )
示例#6
0
    def test_console(self):
        with patch.object(Console, "get_version", return_value="testing"):
            Console.warn("This is a warning")
            Console.error("This is an error message")
            Console.info("This is an informative message")
            Console.banner()

            Console.get_username()
            Console.get_hostname()
            Console.get_date()
            Console.get_version()
            for i in range(1, 10):
                Console.progress(i, 10)
示例#7
0
 def set_loader(self, loader_name):
     if loader_name == "xviii":
         self._loader = xviii.loader
         self._loader_name = "xviii"
     elif loader_name == "default":
         self._loader = default.loader
         self._loader_name = "default"
     elif loader_name == "rosbag":
         self._loader = rosbag.loader
         self._loader_name = "rosbag"
     else:
         Console.quit("The loader type", loader_name, "is not implemented.")
     Console.info("Loader set to", loader_name)
def parse_NOC_polpred(mission, vehicle, category, ftype, outpath):
    # parser meta data
    sensor_string = "autosub"
    category = category
    output_format = ftype

    if category == Category.TIDE:
        filepath = mission.tide.filepath
        timezone = mission.tide.timezone
        timeoffset = mission.tide.timeoffset
        timezone_offset = read_timezone(timezone)

        tide = Tide(mission.tide.std_offset)
        tide.sensor_string = sensor_string

        path = get_raw_folder(outpath / ".." / filepath)

        file_list = get_file_list(path)

        data_list = []

        Console.info("... parsing NOC tide data")
        # Data format sample
        #  Date      Time      Level     Speed    Direc'n
        #                         m        m/s       deg
        # 6/ 9/2019  00:00       0.74      0.14       51
        # 6/ 9/2019  01:00       0.58      0.15       56

        for file in file_list:
            with file.open("r", errors="ignore") as tide_file:
                for line in tide_file.readlines()[6:]:
                    # we have to skip the first 5 rows of the file
                    dd = int(line[0:2])
                    mm = int(line[3:5])
                    yyyy = int(line[6:10])
                    hour = int(line[12:14])
                    mins = int(line[15:17])
                    secs = 0  # current models only provide resolution in minutes
                    msec = 0
                    epoch_time = date_time_to_epoch(yyyy, mm, dd, hour, mins,
                                                    secs, timezone_offset)
                    epoch_timestamp = epoch_time + msec / 1000 + timeoffset
                    tide.epoch_timestamp = epoch_timestamp
                    tide.height = float(line[22:28])
                    tide.height_std = tide.height * tide.height_std_factor

                    data = tide.export(output_format)
                    data_list.append(data)
        return data_list
def determine_extension_and_images_per_folder(folder_path, image_list, label):
    """Determine filename extension and number of images per subfolder

    The number of images per subfolder dertermines how the subfolders are
    named. The subfolder name is 3 letters long and either starts from the
    first (index = 0) or the second (index = 1) digit of the image number.

    :param folder_path: Path where images are stored
    :type  folder_path: pathlib.Path
    :param image_list:  list of image 7-digit zeropadded image numbers
    :type  image_list:  list of str
    :param label:       Camera label
    :type  label:       str

    :returns:
        -index_start_of_folder_name (`int`) - Index where subfolder name starts
        -extension (`str`) - Filename extension of images ("jpg" or "tif")
    """
    Console.info("    Determine filename extension and images per subfolder "
                 "of camera {}...".format(label))

    if build_image_path(folder_path, image_list[-1], 0, "jpg").is_file():
        index_start_of_folder_name = 0
        extension = "jpg"
        Console.info('    ...Filename extension: "{}", 10000 images '
                     "per subfolder.".format(extension))
    elif build_image_path(folder_path, image_list[-1], 1, "jpg").is_file():
        index_start_of_folder_name = 1
        extension = "jpg"
        Console.info('    ...Filename extension: "{}", 1000 images '
                     "per subfolder.".format(extension))
    elif build_image_path(folder_path, image_list[-1], 0, "tif").is_file():
        index_start_of_folder_name = 0
        extension = "tif"
        Console.info('    ...Filename extension: "{}", 10000 images '
                     "per subfolder.".format(extension))
    elif build_image_path(folder_path, image_list[-1], 1, "tif").is_file():
        index_start_of_folder_name = 1
        extension = "tif"
        Console.info('    ...Filename extension: "{}", 1000 images '
                     "per subfolder.".format(extension))
    else:
        index_start_of_folder_name = 0
        extension = "jpg"
        Console.warn("    ...Did not find images from camera {} in {}. "
                     'Default to using extension "{}" and 10000 images per '
                     "subfolder.".format(label, folder_path, extension))

    return index_start_of_folder_name, extension
    def parse(self):
        # parse phins data
        Console.info("... parsing phins standard data")

        data_list = []
        path = get_raw_folder(self.outpath / ".." / self.filepath /
                              self.filename)
        with path.open("r", encoding="utf-8", errors="ignore") as filein:
            for complete_line in filein.readlines():
                line_and_md5 = complete_line.strip().split("*")
                line = line_and_md5[0].strip().split(",")
                if not self.line_is_valid(line, line_and_md5):
                    continue
                header = line[1]
                data = self.process_line(header, line)
                if data is not None:
                    data_list.append(data)
            return data_list
def load_memmap_from_numpyfilelist(filepath, numpyfilelist: list):
    """Generate memmaps from numpy arrays

    Parameters
    -----------
    filepath : Path
        path to output memmap folder
    numpyfilelist : list
        list of paths to numpy files

    Returns
    --------
    Path, numpy.ndarray
        memmap_path and memmap_handle
    """

    image = np.load(str(numpyfilelist[0]))
    list_shape = [len(numpyfilelist)]
    list_shape = list_shape + list(image.shape)

    filename_map = "memmap_" + str(uuid.uuid4()) + ".map"
    memmap_path = Path(filepath) / filename_map

    memmap_handle = np.memmap(
        filename=memmap_path,
        mode="w+",
        shape=tuple(list_shape),
        dtype=np.float32,
    )
    Console.info("Loading memmaps from numpy files...")

    def memmap_loader(numpyfilelist, memmap_handle, idx):
        memmap_handle[idx, ...] = np.load(numpyfilelist[idx])

    with tqdm_joblib(
            tqdm(desc="numpy images to memmap", total=len(numpyfilelist))):
        joblib.Parallel(n_jobs=-2, verbose=0)(
            joblib.delayed(memmap_loader)(numpyfilelist, memmap_handle, idx)
            for idx in range(len(numpyfilelist)))

    return memmap_path, memmap_handle
示例#12
0
def write_sidescan_csv(csv_filepath, data_list, csv_filename, csv_flag):
    if csv_flag:
        csv_file = Path(csv_filepath)
        if not csv_file.exists():
            csv_file.mkdir(parents=True, exist_ok=True)
        Console.info("Writing SSS outputs to {}.txt ...".format(csv_filename))
        file = csv_file / "{}.txt".format(csv_filename)
        if len(data_list) > 0:
            str_to_write = data_list[0].get_sidescan_header()
            with file.open("w") as fileout:
                fileout.write(str_to_write)
                for i in range(len(data_list)):
                    try:
                        str_to_write = data_list[i].to_sidescan_row()
                        fileout.write(str_to_write)
                    except IndexError:
                        break
            Console.info(
                "... done writing SSS outputs to {}.txt.".format(csv_filename))
        else:
            Console.warn("Empty data list {}".format(str(csv_filename)))
示例#13
0
    def get_altitude_and_depth_maps(self):
        """Generate distance matrix numpy files and save them"""
        # read altitude / depth map depending on distance_metric
        if self.distance_metric == "uniform":
            Console.info("Null distance matrix created")
            self.depth_map_list = []
            return
        elif self.distance_metric == "altitude":
            Console.info("Null distance matrix created")
            self.depth_map_list = []
            return
        elif self.distance_metric == "depth_map":
            path_depth = self.path_processed / "depth_map"
            if not path_depth.exists():
                Console.quit("Depth maps not found...")
            else:
                Console.info("Path to depth maps found...")
                depth_map_list = list(path_depth.glob("*.npy"))
                self.depth_map_list = [
                    Path(item)
                    for item in depth_map_list
                    for image_path in self.camera_image_list
                    if Path(image_path).stem in Path(item).stem
                ]

                if len(self.camera_image_list) != len(self.depth_map_list):
                    Console.quit(
                        "The number of images does not coincide with the ",
                        "number of depth maps.",
                    )
示例#14
0
def plane_fitting_ransac(
    cloud_xyz,
    min_distance_threshold,
    sample_size,
    goal_inliers,
    max_iterations,
    plot=False,
):
    model, inliers, iterations = run_ransac(
        cloud_xyz,
        fit_plane,
        lambda x, y: is_inlier(x, y, min_distance_threshold),
        sample_size,
        goal_inliers,
        max_iterations,
    )
    a, b, c, d = model
    if plot:
        import matplotlib.pyplot as plt
        from mpl_toolkits.mplot3d import Axes3D

        fig = plt.figure()
        ax = Axes3D(fig)
        # min_x, max_x, min_y, max_y = bounding_box(cloud_xyz[:, 0:2])
        xx, yy, zz = plot_plane(a, b, c, d)  # , min_x, max_x, min_y, max_y)
        ax.plot_surface(xx, yy, zz, color=(0, 1, 0, 0.5))
        ax.scatter3D(cloud_xyz.T[0], cloud_xyz.T[1], cloud_xyz.T[2], s=1)
        ax.set_xlabel("$X$")
        ax.set_ylabel("$Y$")
        ax.set_zlabel("$Z$")
        plt.show()
    Console.info(
        "RANSAC took",
        iterations + 1,
        " iterations. Best model:",
        model,
        "explains:",
        len(inliers),
    )
    return (a, b, c, d), inliers
    def __init__(self, filepath, force_overwite=False, overwrite_all=False):
        filepath = Path(filepath).resolve()
        self.filepath = get_raw_folder(filepath)
        self.fo = force_overwite
        self.foa = overwrite_all

        if self.foa:
            self.fo = True

        self.configuration_path = (get_config_folder(self.filepath.parent) /
                                   "calibration")
        calibration_config_file = self.configuration_path / "calibration.yaml"
        if calibration_config_file.exists():
            Console.info("Loading existing calibration.yaml at {}".format(
                calibration_config_file))
        else:
            root = Path(__file__).parents[1]
            default_file = root / "auv_cal/default_yaml" / "default_calibration.yaml"
            Console.warn("Cannot find {}, generating default from {}".format(
                calibration_config_file, default_file))
            # save localisation yaml to processed directory
            default_file.copy(calibration_config_file)

            Console.warn("Edit the file at: \n\t" +
                         str(calibration_config_file))
            Console.warn(
                "Try to use relative paths to the calibration datasets")
            Console.quit(
                "Modify the file calibration.yaml and run this code again.")

        with calibration_config_file.open("r") as stream:
            self.calibration_config = yaml.safe_load(stream)

        # Create the calibration folder at the same level as the dives
        self.output_path = get_processed_folder(
            self.filepath.parent) / "calibration"
        if not self.output_path.exists():
            self.output_path.mkdir(parents=True)
        if not self.configuration_path.exists():
            self.configuration_path.mkdir(parents=True)
def call_process(args):
    """Perform processing on source images using correction parameters
    generated in parse and outputs corrected images

    Parameters
    -----------
    args : parse_args object
        User provided arguments for path of source images
    """

    path = Path(args.path).resolve()

    time_string = time.strftime("%Y%m%d_%H%M%S", time.localtime())
    Console.set_logging_file(
        get_processed_folder(path) /
        ("log/" + time_string + "_correct_images_process.log"))

    correct_config, camerasystem = load_configuration_and_camera_system(
        path, args.suffix)

    for camera in camerasystem.cameras:
        Console.info("Processing for camera", camera.name)

        if len(camera.image_list) == 0:
            Console.info(
                "No images found for the camera at the path provided...")
            continue
        else:
            corrector = Corrector(args.force, args.suffix, camera,
                                  correct_config, path)
            if corrector.camera_found:
                corrector.process()
    Console.info("Process completed for all cameras...")
示例#17
0
def rescale_images(
    imagenames_list,
    image_directory,
    interpolate_method,
    target_pixel_size_m,
    dataframe,
    output_directory,
    f_x,
    f_y,
    maintain_pixels,
):
    Console.info("Rescaling images...")

    for idx in trange(len(imagenames_list)):
        image_name = imagenames_list[idx]
        source_image_path = Path(image_directory) / image_name
        output_image_path = Path(output_directory) / image_name
        image_path_list = dataframe["relative_path"]
        trimmed_path_list = [
            path for path in image_path_list if Path(path).stem in image_name
        ]
        trimmed_dataframe = dataframe.loc[dataframe["relative_path"].isin(
            trimmed_path_list)]
        altitude = trimmed_dataframe["altitude [m]"]
        if len(altitude) > 0:
            image = imageio.imread(source_image_path).astype("uint8")
            rescaled_image = rescale(
                image,
                interpolate_method,
                target_pixel_size_m,
                altitude,
                f_x,
                f_y,
                maintain_pixels,
            )
            imageio.imwrite(output_image_path, rescaled_image, format="PNG-FI")
        else:
            Console.warn("Did not get distance values for image: " +
                         image_name)
    def mono(self):
        for c in self.calibration_config["cameras"]:
            cam_name = c["name"]
            # Find if the calibration file exists
            calibration_file = self.output_path / str("mono_" + cam_name +
                                                      ".yaml")
            Console.info("Looking for a calibration file at " +
                         str(calibration_file))
            if calibration_file.exists() and not self.fo:
                Console.warn(
                    "The camera " + c["name"] +
                    " has already been calibrated. If you want to overwrite " +
                    "the JSON, use the -F flag.")
            else:
                Console.info(
                    "The camera is not calibrated, running mono calibration..."
                )
                filepaths = build_filepath(
                    get_processed_folder(self.filepath),
                    c["camera_calibration"]["path"],
                )

                if "glob_pattern" not in c["camera_calibration"]:
                    Console.error(
                        "Could not find the key glob_pattern for the camera ",
                        c["name"],
                    )
                    Console.quit("glob_pattern expected in calibration.yaml")

                calibrate_mono(
                    cam_name,
                    filepaths,
                    str(c["camera_calibration"]["glob_pattern"]),
                    self.calibration_config["camera_calibration"],
                    calibration_file,
                    self.fo,
                    self.foa,
                )
def calibrate_mono(
    name,
    filepaths,
    extension,
    config,
    output_file,
    fo,
    foa,
    target_width=None,
    target_height=None,
):
    if not check_dirs_exist(filepaths):
        filepaths = get_raw_folders(filepaths)
    Console.info("Looking for {} calibration images in {}".format(
        extension, str(filepaths)))
    image_list = collect_image_files(filepaths, extension)
    Console.info("Found " + str(len(image_list)) + " images.")
    if len(image_list) < 8:
        Console.quit("Too few images. Try to get more.")

    mc = MonoCalibrator(
        boards=[
            ChessboardInfo(config["rows"], config["cols"], config["size"])
        ],
        pattern=check_pattern(config),
        invert=config["invert"],
        name=name,
        target_width=target_width,
        target_height=target_height,
    )
    try:
        image_list_file = output_file.with_suffix(".json")
        if foa or not image_list_file.exists():
            mc.cal(image_list)
            with image_list_file.open("w") as f:
                Console.info("Writing JSON to "
                             "'{}'"
                             "".format(image_list_file))
                json.dump(mc.json, f)
        else:
            mc.cal_from_json(image_list_file, image_list)
        mc.report()
        Console.info("Writing calibration to " "'{}'" "".format(output_file))
        with output_file.open("w") as f:
            f.write(mc.yaml())
    except CalibrationException:
        Console.error("The calibration pattern was not found in the images.")
示例#20
0
    def parse(self, path_list, correct_config_list):
        # both path_list and correct_config_list are assumed to be valid + equivalent
        for i in range(len(path_list)):  # for each dive
            path = path_list[i]
            correct_config = correct_config_list[i]

            Console.info("Parsing dive:", path)
            # Console.info("Setting path...")
            self.set_path(path)  # update the dive path

            # Console.info("Loading configuration...")
            self.load_configuration(correct_config)  # load the dive config
            # Set the user specified list - if any
            self.user_specified_image_list = self.user_specified_image_list_parse
            # Update list of images by appending user-defined list
            # TODO: this list must be populated from AFTER loading the configuration and BEFORE getting image list
            self.get_imagelist()

        # Show the total number of images after filtering + merging the dives. It should match the sum of the filtered images of each dive.
        if len(path_list) > 1:
            Console.info(
                "Total number of images after merging dives:",
                len(self.camera_image_list),
            )

        Console.info("Output directories created / existing...")

        if self.correction_method == "colour_correction":
            self.get_altitude_and_depth_maps()
            self.generate_attenuation_correction_parameters()

            for i in range(len(path_list)):  # for each dive
                path = get_processed_folder(path_list[i])
                attn_dir = Path(self.attenuation_parameters_folder)
                relative_folder = attn_dir.relative_to(self.path_processed)
                dest_dir = path / relative_folder
                if dest_dir == attn_dir:
                    # Do not copy over the original files
                    continue
                copy_file_if_exists(self.attenuation_params_filepath, dest_dir)
                copy_file_if_exists(self.correction_gains_filepath, dest_dir)
                copy_file_if_exists(self.corrected_mean_filepath, dest_dir)
                copy_file_if_exists(self.corrected_std_filepath, dest_dir)
                copy_file_if_exists(self.raw_mean_filepath, dest_dir)
                copy_file_if_exists(self.raw_std_filepath, dest_dir)
        elif self.correction_method == "manual_balance":
            Console.info("run process for manual_balance...")
示例#21
0
    def process_correction(self):
        """Execute series of corrections for a set of input images"""

        # check for calibration file if distortion correction needed
        if self.undistort:
            camera_params_folder = Path(self.path_processed).parents[0] / "calibration"
            camera_params_filename = "mono" + self.camera_name + ".yaml"
            camera_params_file_path = camera_params_folder / camera_params_filename

            if not camera_params_file_path.exists():
                Console.info("Calibration file not found...")
                self.undistort = False
            else:
                Console.info("Calibration file found...")
                self.camera_params_file_path = camera_params_file_path

        Console.info(
            "Processing",
            len(self.camera_image_list),
            "images for color, distortion, gamma corrections...",
        )

        with tqdm_joblib(
            tqdm(desc="Correcting images", total=len(self.camera_image_list))
        ):
            self.processed_image_list = joblib.Parallel(n_jobs=-2, verbose=0)(
                joblib.delayed(self.process_image)(idx)
                for idx in range(0, len(self.camera_image_list))
            )

        # write a filelist.csv containing image filenames which are processed
        image_files = []
        for path in self.processed_image_list:
            if path is not None:
                image_files.append(Path(path).name)
        dataframe = pd.DataFrame(image_files)
        filelist_path = self.output_images_folder / "filelist.csv"
        dataframe.to_csv(filelist_path)
        Console.info("Processing of images is completed...")
示例#22
0
def interpolate_sensor_list(
    sensor_list,
    sensor_name,
    sensor_offsets,
    origin_offsets,
    latlon_reference,
    _centre_list,
):
    j = 0
    [origin_x_offset, origin_y_offset, origin_z_offset] = origin_offsets
    [latitude_reference, longitude_reference] = latlon_reference
    # Check if camera activates before dvl and orientation sensors.
    start_time = _centre_list[0].epoch_timestamp
    end_time = _centre_list[-1].epoch_timestamp
    if (sensor_list[0].epoch_timestamp > end_time
            or sensor_list[-1].epoch_timestamp < start_time):
        Console.warn(
            "{} timestamps does not overlap with dead reckoning data, "
            "check timestamp_history.pdf via -v option.".format(sensor_name))
    else:
        sensor_overlap_flag = 0
        for i in range(len(sensor_list)):
            if sensor_list[i].epoch_timestamp < start_time:
                sensor_overlap_flag = 1
                pass
            else:
                if i > 0:
                    Console.warn(
                        "Deleted",
                        i,
                        "entries from sensor",
                        sensor_name,
                        ". Reason: data before start of mission",
                    )
                    del sensor_list[:i]
                break
        for i in range(len(sensor_list)):
            if j >= len(_centre_list) - 1:
                ii = len(sensor_list) - i
                if ii > 0:
                    Console.warn(
                        "Deleted",
                        ii,
                        "entries from sensor",
                        sensor_name,
                        ". Reason: data after end of mission",
                    )
                    del sensor_list[i:]
                sensor_overlap_flag = 1
                break
            while _centre_list[j].epoch_timestamp < sensor_list[
                    i].epoch_timestamp:
                if (j + 1 > len(_centre_list) - 1
                        or _centre_list[j + 1].epoch_timestamp >
                        sensor_list[-1].epoch_timestamp):
                    break
                j += 1
            # if j>=1: ?

            sensor_list[i].roll = interpolate(
                sensor_list[i].epoch_timestamp,
                _centre_list[j - 1].epoch_timestamp,
                _centre_list[j].epoch_timestamp,
                _centre_list[j - 1].roll,
                _centre_list[j].roll,
            )
            sensor_list[i].pitch = interpolate(
                sensor_list[i].epoch_timestamp,
                _centre_list[j - 1].epoch_timestamp,
                _centre_list[j].epoch_timestamp,
                _centre_list[j - 1].pitch,
                _centre_list[j].pitch,
            )
            if abs(_centre_list[j].yaw - _centre_list[j - 1].yaw) > 180:
                if _centre_list[j].yaw > _centre_list[j - 1].yaw:
                    sensor_list[i].yaw = interpolate(
                        sensor_list[i].epoch_timestamp,
                        _centre_list[j - 1].epoch_timestamp,
                        _centre_list[j].epoch_timestamp,
                        _centre_list[j - 1].yaw,
                        _centre_list[j].yaw - 360,
                    )
                else:
                    sensor_list[i].yaw = interpolate(
                        sensor_list[i].epoch_timestamp,
                        _centre_list[j - 1].epoch_timestamp,
                        _centre_list[j].epoch_timestamp,
                        _centre_list[j - 1].yaw - 360,
                        _centre_list[j].yaw,
                    )
                if sensor_list[i].yaw < 0:
                    sensor_list[i].yaw += 360
                elif sensor_list[i].yaw > 360:
                    sensor_list[i].yaw -= 360
            else:
                sensor_list[i].yaw = interpolate(
                    sensor_list[i].epoch_timestamp,
                    _centre_list[j - 1].epoch_timestamp,
                    _centre_list[j].epoch_timestamp,
                    _centre_list[j - 1].yaw,
                    _centre_list[j].yaw,
                )
            sensor_list[i].x_velocity = interpolate(
                sensor_list[i].epoch_timestamp,
                _centre_list[j - 1].epoch_timestamp,
                _centre_list[j].epoch_timestamp,
                _centre_list[j - 1].x_velocity,
                _centre_list[j].x_velocity,
            )
            sensor_list[i].y_velocity = interpolate(
                sensor_list[i].epoch_timestamp,
                _centre_list[j - 1].epoch_timestamp,
                _centre_list[j].epoch_timestamp,
                _centre_list[j - 1].y_velocity,
                _centre_list[j].y_velocity,
            )
            sensor_list[i].z_velocity = interpolate(
                sensor_list[i].epoch_timestamp,
                _centre_list[j - 1].epoch_timestamp,
                _centre_list[j].epoch_timestamp,
                _centre_list[j - 1].z_velocity,
                _centre_list[j].z_velocity,
            )
            sensor_list[i].altitude = interpolate(
                sensor_list[i].epoch_timestamp,
                _centre_list[j - 1].epoch_timestamp,
                _centre_list[j].epoch_timestamp,
                _centre_list[j - 1].altitude,
                _centre_list[j].altitude,
            )

            [x_offset, y_offset, z_offset] = body_to_inertial(
                sensor_list[i].roll,
                sensor_list[i].pitch,
                sensor_list[i].yaw,
                origin_x_offset - sensor_offsets[0],
                origin_y_offset - sensor_offsets[1],
                origin_z_offset - sensor_offsets[2],
            )

            sensor_list[i].northings = (interpolate(
                sensor_list[i].epoch_timestamp,
                _centre_list[j - 1].epoch_timestamp,
                _centre_list[j].epoch_timestamp,
                _centre_list[j - 1].northings,
                _centre_list[j].northings,
            ) - x_offset)
            sensor_list[i].eastings = (interpolate(
                sensor_list[i].epoch_timestamp,
                _centre_list[j - 1].epoch_timestamp,
                _centre_list[j].epoch_timestamp,
                _centre_list[j - 1].eastings,
                _centre_list[j].eastings,
            ) - y_offset)
            sensor_list[i].altitude = (interpolate(
                sensor_list[i].epoch_timestamp,
                _centre_list[j - 1].epoch_timestamp,
                _centre_list[j].epoch_timestamp,
                _centre_list[j - 1].altitude,
                _centre_list[j].altitude,
            ) - z_offset)
            sensor_list[i].depth = (interpolate(
                sensor_list[i].epoch_timestamp,
                _centre_list[j - 1].epoch_timestamp,
                _centre_list[j].epoch_timestamp,
                _centre_list[j - 1].depth,
                _centre_list[j].depth,
            ) - z_offset)
            [sensor_list[i].latitude,
             sensor_list[i].longitude] = metres_to_latlon(
                 latitude_reference,
                 longitude_reference,
                 sensor_list[i].eastings,
                 sensor_list[i].northings,
             )

            sensor_list[i].northings_std = interpolate_property(
                _centre_list, i, sensor_list, j, "northings_std")
            sensor_list[i].eastings_std = interpolate_property(
                _centre_list, i, sensor_list, j, "eastings_std")
            sensor_list[i].depth_std = interpolate_property(
                _centre_list, i, sensor_list, j, "depth_std")
            sensor_list[i].roll_std = interpolate_property(
                _centre_list, i, sensor_list, j, "roll_std")
            sensor_list[i].pitch_std = interpolate_property(
                _centre_list, i, sensor_list, j, "pitch_std")
            sensor_list[i].yaw_std = interpolate_property(
                _centre_list, i, sensor_list, j, "yaw_std")
            sensor_list[i].x_velocity_std = interpolate_property(
                _centre_list, i, sensor_list, j, "x_velocity_std")
            sensor_list[i].y_velocity_std = interpolate_property(
                _centre_list, i, sensor_list, j, "y_velocity_std")
            sensor_list[i].z_velocity_std = interpolate_property(
                _centre_list, i, sensor_list, j, "z_velocity_std")
            sensor_list[i].vroll_std = interpolate_property(
                _centre_list, i, sensor_list, j, "vroll_std")
            sensor_list[i].vpitch_std = interpolate_property(
                _centre_list, i, sensor_list, j, "vpitch_std")
            sensor_list[i].vyaw_std = interpolate_property(
                _centre_list, i, sensor_list, j, "vyaw_std")

            if _centre_list[j].covariance is not None:
                sensor_list[i].covariance = interpolate_covariance(
                    sensor_list[i].epoch_timestamp,
                    _centre_list[j - 1].epoch_timestamp,
                    _centre_list[j].epoch_timestamp,
                    _centre_list[j - 1].covariance,
                    _centre_list[j].covariance,
                )

        if sensor_overlap_flag == 1:
            Console.warn(
                "Sensor data from {} spans further than dead reckoning data."
                " Data outside DR is ignored.".format(sensor_name))
        Console.info("Complete interpolation and coordinate transfomations "
                     "for {}".format(sensor_name))
def parse_acfr_images(mission, vehicle, category, ftype, outpath):
    # parser meta data
    class_string = "measurement"
    frame_string = "body"
    category = "image"
    sensor_string = "acfr_standard"

    timezone = mission.image.timezone
    timeoffset = mission.image.timeoffset
    filepath = mission.image.cameras[0].path
    camera1_label = mission.image.cameras[0].name
    camera2_label = mission.image.cameras[1].name

    # read in timezone
    if isinstance(timezone, str):
        if timezone == "utc" or timezone == "UTC":
            timezone_offset = 0
        elif timezone == "jst" or timezone == "JST":
            timezone_offset = 9
    else:
        try:
            timezone_offset = float(timezone)
        except ValueError:
            print(
                "Error: timezone",
                timezone,
                "in mission.cfg not recognised, please enter value from UTC",
                "in hours",
            )
            return

    # convert to seconds from utc
    # timeoffset = -timezone_offset*60*60 + timeoffset

    Console.info("... parsing " + sensor_string + " images")

    # determine file paths

    filepath = get_raw_folder(outpath / ".." / filepath)
    all_list = os.listdir(str(filepath))

    camera1_filename = [
        line for line in all_list
        if camera1_label in line and ".txt" not in line and "._" not in line
    ]
    camera2_filename = [
        line for line in all_list
        if camera2_label in line and ".txt" not in line and "._" not in line
    ]

    data_list = []
    if ftype == "acfr":
        data_list = ""

    for i in range(len(camera1_filename)):
        epoch_timestamp = acfr_timestamp_from_filename(camera1_filename[i],
                                                       timezone_offset,
                                                       timeoffset)
        epoch_timestamp_camera1.append(str(epoch_timestamp))

    for i in range(len(camera2_filename)):
        epoch_timestamp = acfr_timestamp_from_filename(camera2_filename[i],
                                                       timezone_offset,
                                                       timeoffset)
        epoch_timestamp_camera2.append(str(epoch_timestamp))

    for i in range(len(camera1_filename)):
        # print(epoch_timestamp_camera1[i])
        values = []
        for j in range(len(camera2_filename)):
            # print(epoch_timestamp_camera2[j])
            values.append(
                abs(
                    float(epoch_timestamp_camera1[i]) -
                    float(epoch_timestamp_camera2[j])))
        (sync_difference, sync_pair) = min(
            (v, k) for k, v in enumerate(values))
        if sync_difference > tolerance:
            # Skip the pair
            continue
        if ftype == "oplab":
            data = {
                "epoch_timestamp":
                float(epoch_timestamp_camera1[i]),
                "class":
                class_string,
                "sensor":
                sensor_string,
                "frame":
                frame_string,
                "category":
                category,
                "camera1": [{
                    "epoch_timestamp":
                    float(epoch_timestamp_camera1[i]),
                    "filename":
                    str(filepath) + "/" + str(camera1_filename[i]),
                }],
                "camera2": [{
                    "epoch_timestamp":
                    float(epoch_timestamp_camera2[sync_pair]),
                    "filename":
                    str(filepath) + "/" + str(camera2_filename[sync_pair]),
                }],
            }
            data_list.append(data)
        if ftype == "acfr":
            data = ("VIS: " + str(float(epoch_timestamp_camera1[i])) + " [" +
                    str(float(epoch_timestamp_camera1[i])) + "] " +
                    str(camera1_filename[i]) + " exp: 0\n")
            # fileout.write(data)
            data_list += data
            data = ("VIS: " + str(float(epoch_timestamp_camera2[sync_pair])) +
                    " [" + str(float(epoch_timestamp_camera2[sync_pair])) +
                    "] " + str(camera2_filename[sync_pair]) + " exp: 0\n")
            # fileout.write(data)
            data_list += data

    return data_list
def parse_autosub(mission, vehicle, category, ftype, outpath):
    # parser meta data
    sensor_string = "autosub"
    category = category
    output_format = ftype
    filename = mission.velocity.filename
    filepath = mission.velocity.filepath

    # autosub std models
    depth_std_factor = mission.depth.std_factor
    velocity_std_factor = mission.velocity.std_factor
    velocity_std_offset = mission.velocity.std_offset
    orientation_std_offset = mission.orientation.std_offset
    altitude_std_factor = mission.altitude.std_factor
    headingoffset = vehicle.dvl.yaw

    body_velocity = BodyVelocity(velocity_std_factor, velocity_std_offset,
                                 headingoffset)
    orientation = Orientation(headingoffset, orientation_std_offset)
    depth = Depth(depth_std_factor)
    altitude = Altitude(altitude_std_factor)

    body_velocity.sensor_string = sensor_string
    orientation.sensor_string = sensor_string
    depth.sensor_string = sensor_string
    altitude.sensor_string = sensor_string

    path = get_raw_folder(outpath / ".." / filepath / filename)

    autosub_log = loadmat(str(path))
    autosub_adcp = autosub_log["missionData"]["ADCPbin00"]
    autosub_ins = autosub_log["missionData"]["INSAttitude"]
    autosub_dep_ctl = autosub_log["missionData"]["DepCtlNode"]
    autosub_adcp_log1 = autosub_log["missionData"]["ADCPLog_1"]

    data_list = []
    if category == Category.VELOCITY:
        Console.info("... parsing autosub velocity")
        previous_timestamp = 0
        for i in range(len(autosub_adcp["eTime"])):
            body_velocity.from_autosub(autosub_adcp, i)
            data = body_velocity.export(output_format)
            if body_velocity.epoch_timestamp > previous_timestamp:
                data_list.append(data)
            else:
                data_list[-1] = data
            previous_timestamp = body_velocity.epoch_timestamp
    if category == Category.ORIENTATION:
        Console.info("... parsing autosub orientation")
        previous_timestamp = 0
        for i in range(len(autosub_ins["eTime"])):
            orientation.from_autosub(autosub_ins, i)
            data = orientation.export(output_format)
            if orientation.epoch_timestamp > previous_timestamp:
                data_list.append(data)
            else:
                data_list[-1] = data
            previous_timestamp = orientation.epoch_timestamp
    if category == Category.DEPTH:
        Console.info("... parsing autosub depth")
        previous_timestamp = 0
        for i in range(len(autosub_dep_ctl["eTime"])):
            depth.from_autosub(autosub_dep_ctl, i)
            data = depth.export(output_format)
            if depth.epoch_timestamp > previous_timestamp:
                data_list.append(data)
            else:
                data_list[-1] = data
            previous_timestamp = depth.epoch_timestamp
    if category == Category.ALTITUDE:
        Console.info("... parsing autosub altitude")
        previous_timestamp = 0
        for i in range(len(autosub_adcp_log1["eTime"])):
            altitude.from_autosub(autosub_adcp_log1, i)
            data = altitude.export(output_format)
            if altitude.epoch_timestamp > previous_timestamp:
                data_list.append(data)
            else:
                data_list[-1] = data
            previous_timestamp = altitude.epoch_timestamp
    return data_list
示例#25
0
def parse(filepath, force_overwrite, merge):
    # Filepath is a list. Get the first element by default
    for p in filepath:
        parse_single(p, force_overwrite)

    if merge and len(filepath) > 1:
        Console.info("Merging the dives...")

        # Generate a merged output
        dates = []
        # Collect json files
        json_files = []
        for p in filepath:
            folder_name = Path(p).name
            yyyy = int(folder_name[0:4])
            mm = int(folder_name[4:6])
            dd = int(folder_name[6:8])
            hh = int(folder_name[9:11])
            mm1 = int(folder_name[11:13])
            ss = int(folder_name[13:15])
            d = datetime(yyyy, mm, dd, hh, mm1, ss)
            dates.append(d)

            outpath = get_processed_folder(p)
            nav_file = outpath / "nav/nav_standard.json"
            json_files.append(nav_file)

        data_list = merge_json_files(json_files)

        # Get first and last dates
        dates.sort()
        foldername = (dates[0].strftime("%Y%m%d%H%M%S") + "_" +
                      dates[-1].strftime("%Y%m%d%H%M%S") + "_merged")
        # Create folder if it does not exist
        processed_path = get_processed_folder(filepath[0]).parent
        nav_folder = processed_path / foldername / "nav"
        # make output path if not exist
        if not nav_folder.exists():
            try:
                nav_folder.mkdir(parents=True, exist_ok=True)
            except Exception as e:
                print("Warning:", e)

        # create file (overwrite if exists)
        filename = "nav_standard.json"
        nav_file = nav_folder / filename
        Console.info("Writing the ouput to:", str(nav_file))
        with nav_file.open("w") as fileout:
            json.dump(data_list, fileout, indent=2)
        fileout.close()

        # copy mission.yaml and vehicle.yaml to processed folder for
        # process step
        mission_processed = get_processed_folder(filepath[0]) / "mission.yaml"
        vehicle_processed = get_processed_folder(filepath[0]) / "vehicle.yaml"
        mission_merged = processed_path / foldername / "mission.yaml"
        vehicle_merged = processed_path / foldername / "vehicle.yaml"
        mission_processed.copy(mission_merged)
        vehicle_processed.copy(vehicle_merged)

        # interlace the data based on timestamps
        Console.info("Interlacing merged data...")
        parse_interlacer(nav_folder, filename)
        Console.info(
            "...done interlacing merged data. Output saved to",
            nav_folder / filename,
        )
        plot_parse_data(nav_folder)
        Console.info("Complete merging data")
示例#26
0
def parse_single(filepath, force_overwrite):
    # initiate data and processing flags
    filepath = Path(filepath).resolve()
    filepath = get_raw_folder(filepath)

    if not force_overwrite:
        existing_files = check_output_files_exist(
            get_processed_folder(filepath))
        if existing_files:
            msg = ("It looks like this dataset has already been parsed.\n" +
                   "The following file(s) already exist:\n" + existing_files +
                   "If you would like auv_nav to overwrite existing file," +
                   " rerun it with the flag -F.\n" +
                   "Example:   auv_nav parse -F PATH")
            Console.warn(msg)
            Console.warn("Dataset skipped")
            return

    ftype = "oplab"

    # load mission.yaml config file

    mission_file = filepath / "mission.yaml"
    vehicle_file = filepath / "vehicle.yaml"
    mission_file = get_raw_folder(mission_file)
    vehicle_file = get_raw_folder(vehicle_file)
    Console.info("Loading mission.yaml at", mission_file)
    mission = Mission(mission_file)

    Console.info("Loading vehicle.yaml at", vehicle_file)
    vehicle = Vehicle(vehicle_file)

    # copy mission.yaml and vehicle.yaml to processed folder for process step
    mission_processed = get_processed_folder(mission_file)
    vehicle_processed = get_processed_folder(vehicle_file)

    # Write mission with metadata (username, date and hostname)
    mission.write(mission_processed)
    # mission_file.copy(mission_processed)
    vehicle.write(vehicle_processed)

    # check for recognised formats and create nav file
    outpath = get_processed_folder(filepath)
    outpath = outpath / "nav"
    filename = "nav_standard.json"

    # make file path if not exist
    if not outpath.is_dir():
        try:
            outpath.mkdir()
        except Exception as e:
            print("Warning:", e)

    Console.info("Loading raw data...")

    if multiprocessing.cpu_count() < 4:
        cpu_to_use = 1
    else:
        cpu_to_use = multiprocessing.cpu_count() - 2

    try:
        pool = multiprocessing.Pool(cpu_to_use)
    except AttributeError as e:
        print(
            "Error: ",
            e,
            "\n===============\nThis error is known to \
                happen when running the code more than once from the same \
                console in Spyder. Please run the code from a new console \
                to prevent this error from happening. You may close the \
                current console.\n==============",
        )
    pool_list = []

    # read in, parse data and write data
    if not mission.image.empty():
        if mission.image.format == "acfr_standard" or mission.image.format == "unagi":
            pool_list.append(
                pool.apply_async(
                    parse_acfr_images,
                    [mission, vehicle, "images", ftype, outpath],
                ))
        elif mission.image.format == "seaxerocks_3":
            pool_list.append(
                pool.apply_async(
                    parse_seaxerocks_images,
                    [mission, vehicle, "images", ftype, outpath],
                ))
        elif (mission.image.format == "biocam"
              or mission.image.format == "biocam4000_15c"):
            pool_list.append(
                pool.apply_async(
                    parse_biocam_images,
                    [mission, vehicle, "images", ftype, outpath],
                ))
        elif mission.image.format == "ntnu_stereo":
            pool_list.append(
                pool.apply_async(
                    parse_ntnu_stereo_images,
                    [mission, vehicle, "images", ftype, outpath],
                ))
        elif mission.image.format == "rosbag_extracted_images":
            pool_list.append(
                pool.apply_async(
                    parse_rosbag_extracted_images,
                    [mission, vehicle, "images", ftype, outpath],
                ))
        elif mission.image.format == "rosbag":
            pool_list.append(
                pool.apply_async(
                    parse_rosbag,
                    [mission, vehicle, "images", ftype, outpath],
                ))
        else:
            Console.quit("Mission image format", mission.image.format,
                         "not supported.")
    if not mission.usbl.empty():
        if mission.usbl.format == "gaps":
            pool_list.append(
                pool.apply_async(parse_gaps,
                                 [mission, vehicle, "usbl", ftype, outpath]))
        elif mission.usbl.format == "usbl_dump":
            pool_list.append(
                pool.apply_async(parse_usbl_dump,
                                 [mission, vehicle, "usbl", ftype, outpath]))
        elif mission.usbl.format == "NOC_nmea":
            pool_list.append(
                pool.apply_async(parse_NOC_nmea,
                                 [mission, vehicle, "usbl", ftype, outpath]))
        elif mission.usbl.format == "eiva_navipac":
            pool_list.append(
                pool.apply_async(
                    parse_eiva_navipac,
                    [mission, vehicle, "usbl", ftype, outpath],
                ))
        elif mission.usbl.format == "rosbag":
            pool_list.append(
                pool.apply_async(
                    parse_rosbag,
                    [mission, vehicle, "usbl", ftype, outpath],
                ))
        else:
            Console.quit("Mission usbl format", mission.usbl.format,
                         "not supported.")

    if not mission.velocity.empty():
        if mission.velocity.format == "phins":
            pool_list.append(
                pool.apply_async(
                    parse_phins,
                    [mission, vehicle, "velocity", ftype, outpath]))
        elif mission.velocity.format == "ae2000":
            pool_list.append(
                pool.apply_async(
                    parse_ae2000,
                    [mission, vehicle, "velocity", ftype, outpath],
                ))
        elif mission.velocity.format == "alr":
            pool_list.append(
                pool.apply_async(
                    parse_alr,
                    [mission, vehicle, "velocity", ftype, outpath],
                ))
        elif mission.velocity.format == "autosub":
            pool_list.append(
                pool.apply_async(
                    parse_autosub,
                    [mission, vehicle, "velocity", ftype, outpath],
                ))
        elif mission.velocity.format == "rdi":
            pool_list.append(
                pool.apply_async(
                    parse_rdi, [mission, vehicle, "velocity", ftype, outpath]))
        elif mission.velocity.format == "ntnu_dvl":
            pool_list.append(
                pool.apply_async(
                    parse_ntnu_dvl,
                    [mission, vehicle, "velocity", ftype, outpath],
                ))
        elif mission.usbl.format == "rosbag":
            pool_list.append(
                pool.apply_async(
                    parse_rosbag,
                    [mission, vehicle, "velocity", ftype, outpath],
                ))
        else:
            Console.quit(
                "Mission velocity format",
                mission.velocity.format,
                "not supported.",
            )

    if not mission.orientation.empty():
        if mission.orientation.format == "phins":
            pool_list.append(
                pool.apply_async(
                    parse_phins,
                    [mission, vehicle, "orientation", ftype, outpath],
                ))
        elif mission.orientation.format == "ae2000":
            pool_list.append(
                pool.apply_async(
                    parse_ae2000,
                    [mission, vehicle, "orientation", ftype, outpath],
                ))
        elif mission.orientation.format == "alr":
            pool_list.append(
                pool.apply_async(
                    parse_alr,
                    [mission, vehicle, "orientation", ftype, outpath],
                ))
        elif mission.orientation.format == "autosub":
            pool_list.append(
                pool.apply_async(
                    parse_autosub,
                    [mission, vehicle, "orientation", ftype, outpath],
                ))
        elif mission.orientation.format == "rdi":
            pool_list.append(
                pool.apply_async(
                    parse_rdi,
                    [mission, vehicle, "orientation", ftype, outpath],
                ))
        elif mission.orientation.format == "eiva_navipac":
            pool_list.append(
                pool.apply_async(
                    parse_eiva_navipac,
                    [mission, vehicle, "orientation", ftype, outpath],
                ))
        elif mission.usbl.format == "rosbag":
            pool_list.append(
                pool.apply_async(
                    parse_rosbag,
                    [mission, vehicle, "orientation", ftype, outpath],
                ))
        else:
            Console.quit(
                "Mission orientation format",
                mission.orientation.format,
                "not supported.",
            )

    if not mission.depth.empty():
        if mission.depth.format == "phins":
            pool_list.append(
                pool.apply_async(parse_phins,
                                 [mission, vehicle, "depth", ftype, outpath]))
        elif mission.depth.format == "ae2000":
            pool_list.append(
                pool.apply_async(parse_ae2000,
                                 [mission, vehicle, "depth", ftype, outpath]))
        elif mission.depth.format == "alr":
            pool_list.append(
                pool.apply_async(parse_alr,
                                 [mission, vehicle, "depth", ftype, outpath]))
        elif mission.depth.format == "autosub":
            pool_list.append(
                pool.apply_async(parse_autosub,
                                 [mission, vehicle, "depth", ftype, outpath]))
        elif mission.depth.format == "gaps":
            pool_list.append(
                pool.apply_async(parse_gaps,
                                 [mission, vehicle, "depth", ftype, outpath]))
        elif mission.depth.format == "eiva_navipac":
            pool_list.append(
                pool.apply_async(
                    parse_eiva_navipac,
                    [mission, vehicle, "depth", ftype, outpath],
                ))
        elif mission.usbl.format == "rosbag":
            pool_list.append(
                pool.apply_async(
                    parse_rosbag,
                    [mission, vehicle, "depth", ftype, outpath],
                ))
        else:
            Console.quit("Mission depth format", mission.depth.format,
                         "not supported.")

    if not mission.altitude.empty():
        if mission.altitude.format == "phins":
            pool_list.append(
                pool.apply_async(
                    parse_phins,
                    [mission, vehicle, "altitude", ftype, outpath]))
        elif mission.altitude.format == "ae2000":
            pool_list.append(
                pool.apply_async(
                    parse_ae2000,
                    [mission, vehicle, "altitude", ftype, outpath],
                ))
        elif mission.altitude.format == "alr":
            pool_list.append(
                pool.apply_async(
                    parse_alr,
                    [mission, vehicle, "altitude", ftype, outpath],
                ))
        elif mission.altitude.format == "autosub":
            pool_list.append(
                pool.apply_async(
                    parse_autosub,
                    [mission, vehicle, "altitude", ftype, outpath],
                ))
        elif mission.altitude.format == "rdi":
            pool_list.append(
                pool.apply_async(
                    parse_rdi, [mission, vehicle, "altitude", ftype, outpath]))
        elif mission.altitude.format == "ntnu_dvl":
            pool_list.append(
                pool.apply_async(
                    parse_ntnu_dvl,
                    [mission, vehicle, "altitude", ftype, outpath],
                ))
        elif mission.usbl.format == "rosbag":
            pool_list.append(
                pool.apply_async(
                    parse_rosbag,
                    [mission, vehicle, "altitude", ftype, outpath],
                ))
        else:
            Console.quit(
                "Mission altitude format",
                mission.altitude.format,
                "not supported.",
            )

    if not mission.tide.empty():
        if mission.tide.format == "NOC_polpred":
            tide_list = parse_NOC_polpred(mission, vehicle, "tide", ftype,
                                          outpath)
        else:
            Console.quit("Mission tide format", mission.tide.format,
                         "not supported.")
    else:
        tide_list = None

    pool.close()
    pool.join()

    Console.info("...done loading raw data.")
    Console.info("Compile data list...")

    data_list = [[{
        "epoch_timestamp":
        0.0,
        "class":
        "origin",
        "category":
        "origin",
        "data": [{
            "latitude": mission.origin.latitude,
            "longitude": mission.origin.longitude,
            "crs": mission.origin.crs,
            "date": mission.origin.date,
        }],
    }]]

    # Set advance True/False flag so not comparing for every i in pool_list
    if mission.image.format == "biocam":
        correcting_timestamps = True
    else:
        correcting_timestamps = False
    for i in pool_list:
        results = i.get()
        # If current retrieved data is DEPTH
        # and if TIDE data is available
        if len(results) < 1:
            continue
        if results[0] is None:
            Console.warn(
                "Some results are empty. Please check whether this is correct or not"
            )
            continue
        if correcting_timestamps:
            if results[0]["category"] == "image":
                Console.info("Correction of BioCam cpu timestamps...")
                results = correct_timestamps(results)
        if (results[0]["category"] == Category.DEPTH
                or results[0]["category"] == Category.USBL):
            if not mission.tide.empty():
                # proceed to tidal correction
                Console.info("Tidal correction of depth vector...")
                # Offset depth to acknowledge for tides
                j = 0
                for k in range(len(results)):
                    while (j < len(tide_list)
                           and tide_list[j]["epoch_timestamp"] <
                           results[k]["epoch_timestamp"]):
                        j = j + 1

                    if j >= 1:
                        _result = interpolate(
                            results[k]["epoch_timestamp"],
                            tide_list[j - 1]["epoch_timestamp"],
                            tide_list[j]["epoch_timestamp"],
                            tide_list[j - 1]["data"][0]["height"],
                            tide_list[j]["data"][0]["height"],
                        )
                        if results[0]["category"] == Category.DEPTH:
                            results[k]["data"][0]["depth"] = (
                                results[k]["data"][0]["depth"] - _result)
                        elif results[0]["category"] == Category.USBL:
                            results[k]["data_target"][4]["depth"] = (
                                results[k]["data_target"][4]["depth"] -
                                _result)
        data_list.append(results)

    Console.info("...done compiling data list.")

    Console.info("Writing to output file...")
    data_list_temp = []
    for i in data_list:
        data_list_temp += i

    # create file (overwrite if exists)
    nav_file = outpath / filename
    with nav_file.open("w") as fileout:
        json.dump(data_list_temp, fileout, indent=2)
    fileout.close()
    Console.info("...done writing to output file.")

    del data_list_temp
    del data_list

    # interlace the data based on timestamps
    Console.info("Interlacing data...")
    parse_interlacer(outpath, filename)
    Console.info("...done interlacing data. Output saved to",
                 outpath / filename)
    plot_parse_data(outpath, ftype)
    Console.info("Complete parse data")
def parse_seaxerocks_images(mission, vehicle, category, ftype, outpath):
    data_list = []
    if ftype == "acfr":
        data_list = ""

    # parser meta data
    class_string = "measurement"
    frame_string = "body"
    category_stereo = "image"
    category_laser = "laser"
    sensor_string = "seaxerocks_3"

    timezone = mission.image.timezone
    timeoffset = mission.image.timeoffset
    camera1_filepath = mission.image.cameras[0].path
    camera2_filepath = mission.image.cameras[1].path
    camera3_filepath = mission.image.cameras[2].path
    camera1_label = mission.image.cameras[0].name
    camera2_label = mission.image.cameras[1].name
    camera3_label = mission.image.cameras[2].name

    epoch_timestamp_stereo = []
    epoch_timestamp_laser = []
    epoch_timestamp_camera1 = []
    epoch_timestamp_camera2 = []
    epoch_timestamp_camera3 = []
    stereo_index = []
    laser_index = []
    camera1_index = []
    camera2_index = []
    camera3_index = []
    camera1_filename = []
    camera2_filename = []
    camera3_filename = []

    camera1_serial = list(camera1_label)
    camera2_serial = list(camera2_label)
    camera3_serial = list(camera3_label)

    for i in range(1, len(camera1_label)):
        if camera1_label[i] == "/":
            camera1_serial[i] = "_"

    for i in range(1, len(camera2_label)):
        if camera2_label[i] == "/":
            camera2_serial[i] = "_"

    for i in range(1, len(camera3_label)):
        if camera3_label[i] == "/":
            camera3_serial[i] = "_"

    camera1_serial = "".join(camera1_serial)
    camera2_serial = "".join(camera2_serial)
    camera3_serial = "".join(camera3_serial)

    i = 0
    # read in timezone
    # TODO change ALL timezones to integers
    if isinstance(timezone, str):
        if timezone == "utc" or timezone == "UTC":
            timezone_offset = 0
        elif timezone == "jst" or timezone == "JST":
            timezone_offset = 9
    else:
        try:
            timezone_offset = float(timezone)
        except ValueError:
            print(
                "Error: timezone",
                timezone,
                "in mission.cfg not recognised, \
                  please enter value from UTC in hours",
            )
            return

    # convert to seconds from utc
    # timeoffset = -timezone_offset*60*60 + timeoffset

    Console.info("  Parsing " + sensor_string + " images...")

    cam1_path = get_raw_folder(outpath / ".." / camera1_filepath / "..")
    cam1_filetime = cam1_path / "FileTime.csv"

    with cam1_filetime.open("r", encoding="utf-8", errors="ignore") as filein:
        for line in filein.readlines():
            stereo_index_timestamps = line.strip().split(",")

            index_string = stereo_index_timestamps[0]
            date_string = stereo_index_timestamps[1]
            time_string = stereo_index_timestamps[2]
            ms_time_string = stereo_index_timestamps[3]

            # read in date
            if date_string != "date":  # ignore header
                stereo_index.append(index_string)
                if len(date_string) != 8:
                    Console.warn(
                        "Date string ({}) in FileTime.csv file has "
                        "unexpected length. Expected length: 8.".format(
                            date_string))
                yyyy = int(date_string[0:4])
                mm = int(date_string[4:6])
                dd = int(date_string[6:8])

                # read in time
                if len(time_string) != 6:
                    Console.warn(
                        "Time string ({}) in FileTime.csv file has "
                        "unexpected length. Expected length: 6.".format(
                            time_string))
                hour = int(time_string[0:2])
                mins = int(time_string[2:4])
                secs = int(time_string[4:6])
                msec = int(ms_time_string[0:3])

                epoch_time = date_time_to_epoch(yyyy, mm, dd, hour, mins, secs,
                                                timezone_offset)

                epoch_timestamp_stereo.append(
                    float(epoch_time + msec / 1000 + timeoffset))

    camera1_list = ["{}.raw".format(i) for i in stereo_index]
    camera2_list = ["{}.raw".format(i) for i in stereo_index]

    for i in range(len(camera1_list)):
        camera1_image = camera1_list[i].split(".")
        camera2_image = camera2_list[i].split(".")

        camera1_index.append(camera1_image[0])
        camera2_index.append(camera2_image[0])

    j = 0
    for i in range(len(camera1_list)):
        # find corresponding timestamp even if some images are deletec
        if camera1_index[i] == stereo_index[j]:
            epoch_timestamp_camera1.append(epoch_timestamp_stereo[j])
            epoch_timestamp_camera2.append(epoch_timestamp_stereo[j])
            date = epoch_to_day(epoch_timestamp_stereo[0])
            if ftype == "acfr":
                camera1_filename.append("sx3_" + date[2:4] + date[5:7] +
                                        date[8:10] + "_image" +
                                        str(camera1_index[i]) + "_FC.png")
                camera2_filename.append("sx3_" + date[2:4] + date[5:7] +
                                        date[8:10] + "_image" +
                                        str(camera2_index[i]) + "_AC.png")
            j = j + 1
        elif stereo_index[j] > camera1_index[i]:
            j = j + 1
        else:
            j = j - 1

    if ftype == "oplab":
        camera1_filename = [line for line in camera1_list]
        camera2_filename = [line for line in camera2_list]

    for i in range(len(camera1_list)):
        if ftype == "acfr":
            data = ("VIS: " + str(float(epoch_timestamp_camera1[i])) + " [" +
                    str(float(epoch_timestamp_camera1[i])) + "] " +
                    str(camera1_filename[i]) + " exp: 0\n")
            data_list += data
            data = ("VIS: " + str(float(epoch_timestamp_camera2[i])) + " [" +
                    str(float(epoch_timestamp_camera2[i])) + "] " +
                    str(camera2_filename[i]) + " exp: 0\n")
            data_list += data

        if ftype == "oplab":
            data = {
                "epoch_timestamp":
                float(epoch_timestamp_camera1[i]),
                "class":
                class_string,
                "sensor":
                sensor_string,
                "frame":
                frame_string,
                "category":
                category_stereo,
                "camera1": [{
                    "epoch_timestamp":
                    float(epoch_timestamp_camera1[i]),
                    "serial":
                    camera1_serial,
                    "filename":
                    str(camera1_filepath + "/" + camera1_filename[i]),
                }],
                "camera2": [{
                    "epoch_timestamp":
                    float(epoch_timestamp_camera2[i]),
                    "serial":
                    camera2_serial,
                    "filename":
                    str(camera2_filepath + "/" + camera2_filename[i]),
                }],
            }
            data_list.append(data)

    cam3_path = get_raw_folder(outpath / ".." / camera3_filepath)
    cam3_filetime = cam3_path / "FileTime.csv"
    with cam3_filetime.open("r", encoding="utf-8", errors="ignore") as filein:
        for line in filein.readlines():
            laser_index_timestamps = line.strip().split(",")

            if len(laser_index_timestamps) < 4:
                Console.warn("The laser FileTime.csv is apparently corrupt...")
                continue
            index_string = laser_index_timestamps[0]
            date_string = laser_index_timestamps[1]
            time_string = laser_index_timestamps[2]
            ms_time_string = laser_index_timestamps[3]

            # read in date
            if date_string != "date":  # ignore header
                laser_index.append(index_string)

                yyyy = int(date_string[0:4])
                mm = int(date_string[4:6])
                dd = int(date_string[6:8])

                # read in time
                hour = int(time_string[0:2])
                mins = int(time_string[2:4])
                secs = int(time_string[4:6])
                msec = int(ms_time_string[0:3])

                epoch_time = date_time_to_epoch(yyyy, mm, dd, hour, mins, secs,
                                                timezone_offset)

                epoch_timestamp_laser.append(
                    float(epoch_time + msec / 1000 + timeoffset))

    # try use pandas for all parsers, should be faster
    camera3_list = ["{}".format(i) for i in laser_index]

    # The LM165 images are saved either as jpg or as tif, and are split into
    # subfolders either at every 1000 or every 10000 images. Find out which
    # convention is used in current dataset by looking at the files.
    if len(camera3_list) > 0:
        s, extension = determine_extension_and_images_per_folder(
            cam3_path, camera3_list, camera3_label)

    for i in range(len(camera3_list)):
        camera3_filename.append("{}/image{}.{}".format(
            camera3_list[i][s:(s + 3)],
            camera3_list[i],
            extension,  # noqa: E203
        ))
        camera3_index.append(camera3_list[i])

    j = 0
    # original comment: find corresponding timestamp even if some images are
    # deleted
    for i in range(len(camera3_filename)):
        if camera3_index[i] == laser_index[j]:
            epoch_timestamp_camera3.append(epoch_timestamp_laser[j])
            j = j + 1
        # Jin: incomplete? it means that laser data is missing for this image
        # file, so no epoch_timestamp data, and do what when this happens?
        elif laser_index[j] > camera3_index[i]:
            j = j + 1
        else:
            # Jin: incomplete and possibly wrong? it means that this laser
            # data is extra, with no accompanying image file, so it should be
            # j+1 till index match?
            j = j - 1

        if ftype == "oplab":
            data = {
                "epoch_timestamp": float(epoch_timestamp_camera3[i]),
                "class": class_string,
                "sensor": sensor_string,
                "frame": frame_string,
                "category": category_laser,
                "serial": camera3_serial,
                "filename": camera3_filepath + "/" + str(camera3_filename[i]),
            }
            data_list.append(data)

    Console.info("  ...done parsing " + sensor_string + " images.")

    return data_list
示例#28
0
def acfr_to_oplab(args):
    csv_filepath = Path(args.output_folder)
    force_overwite = args.force
    if not args.vehicle_pose and not args.stereo_pose:
        Console.error("Please provide at least one file")
        Console.quit("Missing comandline arguments")
    if args.vehicle_pose:
        Console.info("Vehicle pose provided! Converting it to DR CSV...")
        vpp = AcfrVehiclePoseParser(args.vehicle_pose)
        dr = vpp.get_dead_reckoning()
        csv_filename = "auv_acfr_centre.csv"

        if (csv_filepath / csv_filename).exists() and not force_overwite:
            Console.error(
                "The DR file already exists. Use -F to force overwrite it")
            Console.quit("Default behaviour: not to overwrite results")
        else:
            write_csv(csv_filepath, dr, csv_filename, csv_flag=True)
            Console.info("Output vehicle pose written to",
                         csv_filepath / csv_filename)
    if args.stereo_pose:
        Console.info("Stereo pose provided! Converting it to camera CSVs...")
        spp = AcfrStereoPoseParser(args.stereo_pose)
        cam1, cam2 = spp.get_cameras()
        if (csv_filepath / "auv_dr_LC.csv").exists() and not force_overwite:
            Console.error(
                "The camera files already exists. Use -F to force overwrite it"
            )
            Console.quit("Default behaviour: not to overwrite results")
        else:
            write_csv(csv_filepath, cam1, "auv_acfr_LC", csv_flag=True)
            write_csv(csv_filepath, cam2, "auv_acfr_RC", csv_flag=True)
            Console.info("Output camera files written at", csv_filepath)

        if args.dive_folder is None:
            return

        # If the user provides a dive path, we can interpolate the ACFR navigation
        # to the laser timestamps
        path_processed = get_processed_folder(args.dive_folder)
        nav_standard_file = path_processed / "nav" / "nav_standard.json"
        nav_standard_file = get_processed_folder(nav_standard_file)
        Console.info("Loading json file {}".format(nav_standard_file))

        start_datetime = ""
        finish_datetime = ""

        file3 = csv_filepath / "auv_acfr_laser.csv"
        if file3.exists() and not force_overwite:
            Console.error(
                "The laser file already exists. Use -F to force overwrite it")
            Console.quit("Default behaviour: not to overwrite results")

        with nav_standard_file.open("r") as nav_standard:
            parsed_json_data = json.load(nav_standard)

            if start_datetime == "":
                epoch_start_time = epoch_from_json(parsed_json_data[1])
                start_datetime = epoch_to_datetime(epoch_start_time)
            else:
                epoch_start_time = string_to_epoch(start_datetime)
            if finish_datetime == "":
                epoch_finish_time = epoch_from_json(parsed_json_data[-1])
                finish_datetime = epoch_to_datetime(epoch_finish_time)
            else:
                epoch_finish_time = string_to_epoch(finish_datetime)

            Console.info("Interpolating laser to ACFR stereo pose data...")
            fileout3 = file3.open(
                "w"
            )  # ToDo: Check if file exists and only overwrite if told ('-F')
            fileout3.write(cam1[0].get_csv_header())
            for i in range(len(parsed_json_data)):
                Console.progress(i, len(parsed_json_data))
                epoch_timestamp = parsed_json_data[i]["epoch_timestamp"]
                if (epoch_timestamp >= epoch_start_time
                        and epoch_timestamp <= epoch_finish_time):
                    if "laser" in parsed_json_data[i]["category"]:
                        filename = parsed_json_data[i]["filename"]
                        c3_interp = interpolate_camera(epoch_timestamp, cam1,
                                                       filename)
                        fileout3.write(c3_interp.to_csv_row())
            Console.info("Done! Laser file available at", str(file3))
示例#29
0
def hybis_to_oplab(args):
    if args.navigation_file is None:
        Console.error("Please provide a navigation file")
        Console.quit("Missing comandline arguments")
    if args.image_path is None:
        Console.error("Please provide an image path")
        Console.quit("Missing comandline arguments")
    if args.reference_lat is None:
        Console.error("Please provide a reference_lat")
        Console.quit("Missing comandline arguments")
    if args.reference_lon is None:
        Console.error("Please provide a reference_lon")
        Console.quit("Missing comandline arguments")
    if args.output_folder is None:
        Console.error("Please provide an output path")
        Console.quit("Missing comandline arguments")
    parser = HybisParser(
        args.navigation_file,
        args.image_path,
        args.reference_lat,
        args.reference_lon,
    )
    force_overwite = args.force
    filepath = get_processed_folder(args.output_folder)
    start_datetime = epoch_to_datetime(parser.start_epoch)
    finish_datetime = epoch_to_datetime(parser.finish_epoch)

    # make path for processed outputs
    json_filename = ("json_renav_" + start_datetime[0:8] + "_" +
                     start_datetime[8:14] + "_" + finish_datetime[0:8] + "_" +
                     finish_datetime[8:14])
    renavpath = filepath / json_filename
    if not renavpath.is_dir():
        try:
            renavpath.mkdir()
        except Exception as e:
            print("Warning:", e)
    elif renavpath.is_dir() and not force_overwite:
        # Check if dataset has already been processed
        Console.error(
            "It looks like this dataset has already been processed for the",
            "specified time span.",
        )
        Console.error(
            "The following directory already exist: {}".format(renavpath))
        Console.error(
            "To overwrite the contents of this directory rerun auv_nav with",
            "the flag -F.",
        )
        Console.error("Example:   auv_nav process -F PATH")
        Console.quit("auv_nav process would overwrite json_renav files")

    output_dr_centre_path = renavpath / "csv" / "dead_reckoning"
    if not output_dr_centre_path.exists():
        output_dr_centre_path.mkdir(parents=True)
    camera_name = "hybis_camera"
    output_dr_centre_path = output_dr_centre_path / ("auv_dr_" + camera_name +
                                                     ".csv")
    parser.write(output_dr_centre_path)
    parser.write(output_dr_centre_path)
    Console.info("Output written to", output_dr_centre_path)
示例#30
0
def oplab_to_acfr(args):
    if not args.dive_folder:
        Console.error("No dive folder provided. Exiting...")
        Console.quit("Missing comandline arguments")
    if not args.output_folder:
        Console.error("No output folder provided. Exiting...")
        Console.quit("Missing comandline arguments")

    output_folder = get_processed_folder(args.output_folder)
    vf = output_folder / "vehicle_pose_est.data"
    sf = output_folder / "stereo_pose_est.data"
    if (vf.exists() or sf.exists()) and not args.force:
        Console.error(
            "Output folder already exists. Use -F to overwrite. Exiting...")
        Console.quit("Default behaviour: not to overwrite results")

    Console.info("Loading mission.yaml")
    path_processed = get_processed_folder(args.dive_folder)
    mission_file = path_processed / "mission.yaml"
    mission = Mission(mission_file)

    vehicle_file = path_processed / "vehicle.yaml"
    vehicle = Vehicle(vehicle_file)

    origin_lat = mission.origin.latitude
    origin_lon = mission.origin.longitude

    json_list = list(path_processed.glob("json_*"))
    if len(json_list) == 0:
        Console.quit(
            "No navigation solution could be found. Please run ",
            "auv_nav parse and process first",
        )
    navigation_path = path_processed / json_list[0]
    # Try if ekf exists:
    full_navigation_path = navigation_path / "csv" / "ekf"

    vpw = AcfrVehiclePoseWriter(vf, origin_lat, origin_lon)
    vehicle_navigation_file = full_navigation_path / "auv_ekf_centre.csv"
    dataframe = pd.read_csv(vehicle_navigation_file)
    dr_list = []
    for _, row in dataframe.iterrows():
        sb = SyncedOrientationBodyVelocity()
        sb.from_df(row)
        dr_list.append(sb)
    vpw.write(dr_list)
    Console.info("Vehicle pose written to", vf)

    if not mission.image.cameras:
        Console.warn("No cameras found in the mission file.")
        return
    if len(mission.image.cameras) == 1:
        Console.error("Only one camera found in the mission file. Exiting...")
        Console.quit("ACFR stereo pose est requires two cameras.")

    camera0_name = mission.image.cameras[0].name
    camera1_name = mission.image.cameras[1].name

    camera0_nav_file = full_navigation_path / ("auv_ekf_" + camera0_name +
                                               ".csv")
    dataframe = pd.read_csv(camera0_nav_file)
    cam1_list = []
    for _, row in dataframe.iterrows():
        sb = Camera()
        sb.from_df(row)
        cam1_list.append(sb)

    camera1_nav_file = full_navigation_path / ("auv_ekf_" + camera1_name +
                                               ".csv")

    dataframe = pd.read_csv(camera1_nav_file)
    cam2_list = []
    for _, row in dataframe.iterrows():
        sb = Camera()
        sb.from_df(row)
        cam2_list.append(sb)

    spw = AcfrStereoPoseWriter(sf, origin_lat, origin_lon)

    spw.write(cam1_list, cam2_list)
    Console.info("Stereo pose written to", sf)

    Console.info("Generating combined.RAW")
    nav_standard_file = path_processed / "nav" / "nav_standard.json"
    nav_standard_file = get_processed_folder(nav_standard_file)
    Console.info("Loading json file {}".format(nav_standard_file))

    with nav_standard_file.open("r") as nav_standard:
        parsed_json_data = json.load(nav_standard)

    start_datetime = ""
    finish_datetime = ""

    # setup start and finish date time
    if start_datetime == "":
        epoch_start_time = epoch_from_json(parsed_json_data[1])
        start_datetime = epoch_to_datetime(epoch_start_time)
    else:
        epoch_start_time = string_to_epoch(start_datetime)
    if finish_datetime == "":
        epoch_finish_time = epoch_from_json(parsed_json_data[-1])
        finish_datetime = epoch_to_datetime(epoch_finish_time)
    else:
        epoch_finish_time = string_to_epoch(finish_datetime)

    sensors_std = {
        "usbl": {
            "model": "sensor"
        },
        "dvl": {
            "model": "sensor"
        },
        "depth": {
            "model": "sensor"
        },
        "orientation": {
            "model": "sensor"
        },
    }

    exporter = AcfrCombinedRawWriter(mission, vehicle, output_folder)

    # read in data from json file
    # i here is the number of the data packet
    for i in range(len(parsed_json_data)):
        Console.progress(i, len(parsed_json_data))
        epoch_timestamp = parsed_json_data[i]["epoch_timestamp"]
        if epoch_timestamp >= epoch_start_time and epoch_timestamp <= epoch_finish_time:
            if "velocity" in parsed_json_data[i]["category"]:
                if "body" in parsed_json_data[i]["frame"]:
                    # to check for corrupted data point which have inertial
                    # frame data values
                    if "epoch_timestamp_dvl" in parsed_json_data[i]:
                        # confirm time stamps of dvl are aligned with main
                        # clock (within a second)
                        if (abs(parsed_json_data[i]["epoch_timestamp"] -
                                parsed_json_data[i]["epoch_timestamp_dvl"])
                            ) < 1.0:
                            velocity_body = BodyVelocity()
                            velocity_body.from_json(parsed_json_data[i],
                                                    sensors_std["dvl"])
                            exporter.add(velocity_body)
                if "inertial" in parsed_json_data[i]["frame"]:
                    velocity_inertial = InertialVelocity()
                    velocity_inertial.from_json(parsed_json_data[i])
                    exporter.add(velocity_inertial)

            if "orientation" in parsed_json_data[i]["category"]:
                orientation = Orientation()
                orientation.from_json(parsed_json_data[i],
                                      sensors_std["orientation"])
                exporter.add(orientation)

            if "depth" in parsed_json_data[i]["category"]:
                depth = Depth()
                depth.from_json(parsed_json_data[i], sensors_std["depth"])
                exporter.add(depth)

            if "altitude" in parsed_json_data[i]["category"]:
                altitude = Altitude()
                altitude.from_json(parsed_json_data[i])
                exporter.add(altitude)

            if "usbl" in parsed_json_data[i]["category"]:
                usbl = Usbl()
                usbl.from_json(parsed_json_data[i], sensors_std["usbl"])
                exporter.add(usbl)

            if "image" in parsed_json_data[i]["category"]:
                camera1 = Camera()
                # LC
                camera1.from_json(parsed_json_data[i], "camera1")
                exporter.add(camera1)
                camera2 = Camera()
                camera2.from_json(parsed_json_data[i], "camera2")
                exporter.add(camera2)