def run(self):
     self.started.emit(True)
     try:
         self.new_fqprs = []
         if self.new_project_path:
             data = return_project_data(self.new_project_path)
         else:
             data = {'fqpr_paths': [], 'surface_paths': []}
             if self.force_add_fqprs:
                 data['fqpr_paths'] = self.force_add_fqprs
             if self.force_add_surfaces:
                 data['surface_paths'] = self.force_add_surfaces
         for pth in data['fqpr_paths']:
             fqpr_entry = reload_data(pth,
                                      skip_dask=True,
                                      silent=True,
                                      show_progress=True)
             if fqpr_entry is not None:  # no fqpr instance successfully loaded
                 self.new_fqprs.append(fqpr_entry)
             else:
                 print('Unable to load converted data from {}'.format(pth))
         for pth in data['surface_paths']:
             surf_entry = reload_surface(pth)
             if surf_entry is not None:  # no grid instance successfully loaded
                 self.new_surfaces.append(surf_entry)
             else:
                 print('Unable to load surface from {}'.format(pth))
     except Exception as e:
         self.error = True
         self.exceptiontxt = traceback.format_exc()
     self.finished.emit(True)
Exemplo n.º 2
0
 def _access_processed_data(self):
     """
     Either reload (if data has already been processed once here) or process the test line
     """
     try:
         self.out = reload_data(self.datapath)
         if not self.out:
             self.out = process_multibeam(convert_multibeam(self.testfile, outfold=self.datapath),
                                          coord_system='NAD83')
         print('reload')
     except:
         self.out = process_multibeam(convert_multibeam(self.testfile, outfold=self.datapath), coord_system='NAD83')
         print('process')
Exemplo n.º 3
0
    def new_fqpr_path(self, fqpr_path: str):
        """
        User selected a new fqpr instance (fqpr = the converted datastore, see file_browse)
        """
        try:
            self.fqpr = reload_data(fqpr_path, skip_dask=True, silent=True)
            self.fil_text.setText(fqpr_path)

            if self.fqpr is not None:
                self.fqpr_path = fqpr_path
            else:
                self.fqpr_path = None
                self.warning_message.setText(
                    'ERROR: Invalid path to converted data store')
        except:
            return
    def test_converted_data_content(self):
        out = reload_data(self.datapath)
        ad = par3.AllRead(self.testfile)
        ad.mapfile()

        # assert that they have the same number of pings
        assert out.multibeam.raw_ping[0].time.shape[0] == ad.map.getnum(78)

        # assert that there are the same number of attitude/navigation packets
        totatt = 0
        for i in range(ad.map.getnum(65)):
            rec = ad.getrecord(65, i)
            totatt += rec.data['Time'].shape[0]
        assert out.multibeam.raw_att.time.shape[0] == totatt

        ad.close()
        out.close()
Exemplo n.º 5
0
    def add_fqpr(self, pth: Union[str, Fqpr], skip_dask: bool = False):
        """
        Add a new Fqpr object to this project.  If skip_dask is True, will auto start a new dask LocalCluster

        Parameters
        ----------
        pth
            path to the top level folder for the Fqpr project or the already loaded Fqpr instance itself
        skip_dask
            if True will skip auto starting a dask LocalCluster

        Returns
        -------
        str
            project entry in the dictionary, will be the relative path to the kluster data store from the project file
        bool
            False if the fqpr was already in the project, True if added
        """

        if type(pth) == str:
            fq = reload_data(pth,
                             skip_dask=skip_dask,
                             silent=True,
                             show_progress=not self.is_gui)
        else:  # pth is the new Fqpr instance, pull the actual path from the Fqpr attribution
            fq = pth
            pth = os.path.normpath(fq.multibeam.raw_ping[0].output_path)
        if fq is not None:
            if self.path is None:
                self._setup_new_project(os.path.dirname(pth))
            relpath = self.path_relative_to_project(pth)
            if relpath in self.fqpr_instances:
                already_in = True
            else:
                already_in = False
            self.fqpr_instances[relpath] = fq
            self.fqpr_attrs[relpath] = get_attributes_from_fqpr(
                fq, include_mode=False)
            self.regenerate_fqpr_lines(relpath)

            for callback in self._project_observers:
                callback(True)
            print('Successfully added {}'.format(pth))
            return relpath, already_in
        return None, False
    def new_fqpr_path(self, fqpr_path: str, fqpr_loaded=None):
        """
        User selected a new fqpr instance (fqpr = the converted datastore, see file_browse)
        """
        try:
            self.basefqpr = None
            if fqpr_loaded:
                self.fqpr = fqpr_loaded
            else:
                self.fqpr = reload_data(fqpr_path, skip_dask=True, silent=True)
            self.fil_text.setText(fqpr_path)
            self.fil_text_additional.setText('')
            self.fil_text_additional.hide()
            self.browse_button_additional.hide()

            if self.fqpr is not None:
                self.fqpr_path = fqpr_path
                self.add_converted_button.setEnabled(True)
            else:
                self.fqpr_path = None
                self.add_converted_button.setEnabled(False)
                self.warning_message.setText('ERROR: Invalid path to converted data store')
        except:
            return
Exemplo n.º 7
0
# Examples related to changing, subsetting, filtering and saving data, last updated 2/23/2022, Kluster 0.8.10
# uses the multibeam file hstb_kluster/test_data/0009_20170523_181119_FA2806.all
# Written by Eric Younkin

import numpy as np
from HSTB.kluster.fqpr_convenience import reload_data
from HSTB.kluster.fqpr_intelligence import intel_process
from HSTB.kluster import kluster_variables

# we start with one of the preferred processing steps from the data_processing example
_, fq = intel_process(r"C:\data_dir\0009_20170523_181119_FA2806.all")
fq = fq[
    0]  # this should just be a list of one element if you have just one sonar/day of data, take the first one to get the data
# or we can just reload if you have data from before
fq = reload_data(r"C:\data_dir\em2040_40111_05_23_2017")

# Build out a polygon in geographic coordinates to just get a subset of data from this dataset (lon, lat)
polygon = np.array([[-122.47798556, 47.78949665], [-122.47798556, 47.78895117],
                    [-122.47771027, 47.78895117], [-122.47771027,
                                                   47.78949665]])
# return soundings gets you the variables used in Points View, these are all 1d arrays of the same length
head, x, y, z, tvu, rejected, pointtime, beam = fq.return_soundings_in_polygon(
    polygon)
assert head.shape == x.shape == y.shape == z.shape == tvu.shape == rejected.shape == pointtime.shape == beam.shape
assert x.shape == (1911, )

# rejected array is actually an array of integers that are the sounding flags kluster uses for rejecting/accepting soundings
print(kluster_variables.amplitude_detect_flag)  # added in kluster 0.8.10
print(kluster_variables.phase_detect_flag)  # added in kluster 0.8.10
print(kluster_variables.rejected_flag)
print(kluster_variables.accepted_flag)
    def new_additional_fqpr_path(self, fqpr_path: str):
        """
        User wants to add an additional FQPR instance to the base one
        """
        try:
            add_fqpr = reload_data(fqpr_path, skip_dask=True, silent=True)
            self.additional_fqpr_path = fqpr_path
            self.fil_text_additional.setText(fqpr_path)

            self.load_original_fqpr()

            if add_fqpr:
                sysid = self.fqpr.multibeam.raw_ping[0].system_identifier
                new_sysid = add_fqpr.multibeam.raw_ping[0].system_identifier
                if sysid == new_sysid:  # both converted data instances need to be from the same sonar
                    sonartype = self.fqpr.multibeam.raw_ping[0].sonartype
                    new_sonartype = add_fqpr.multibeam.raw_ping[0].sonartype
                    if sonartype == new_sonartype:  # both converted data instances need to be from the same sonar
                        vertref = self.fqpr.multibeam.raw_ping[0].vertical_reference
                        new_vertref = add_fqpr.multibeam.raw_ping[0].vertical_reference
                        if vertref != new_vertref:  # both converted data instances need to be from the same sonar
                            self.warning_message.setText('WARNING: The vertical reference in both converted data folders does not match.')
                        horizcrs = self.fqpr.horizontal_crs
                        new_horizcrs = add_fqpr.horizontal_crs
                        if horizcrs != new_horizcrs:
                            self.warning_message.setText('WARNING: The coordinate system in both converted data folders does not match.')
                        self.store_original_fqpr()
                        base_time = self.fqpr.multibeam.raw_ping[0].time.values
                        new_time = add_fqpr.multibeam.raw_ping[0].time.values
                        duplicates = np.intersect1d(new_time, base_time)
                        if duplicates.any():
                            self.warning_message.setText('ERROR: Found duplicate time stamps between the two converted datasets')
                            return True
                        elif base_time[-1] < new_time[0]:
                            firstfq = self.fqpr
                            secondfq = add_fqpr
                        else:
                            firstfq = add_fqpr
                            secondfq = self.fqpr

                        mfiles = deepcopy(self.fqpr.multibeam.raw_ping[0].attrs['multibeam_files'])
                        mfiles.update(add_fqpr.multibeam.raw_ping[0].attrs['multibeam_files'])
                        self.fqpr.multibeam.raw_ping[0] = xr.concat([firstfq.multibeam.raw_ping[0], secondfq.multibeam.raw_ping[0]], dim='time')
                        if len(self.fqpr.multibeam.raw_ping) == 2:
                            self.fqpr.multibeam.raw_ping[1] = xr.concat([firstfq.multibeam.raw_ping[1], secondfq.multibeam.raw_ping[1]], dim='time')
                        if len(self.fqpr.multibeam.raw_ping) > 2:
                            raise ValueError('new_additional_fqpr_path: Currently only supporting maximum of 2 heads')
                        self.fqpr.multibeam.raw_att = xr.concat([firstfq.multibeam.raw_att, secondfq.multibeam.raw_att], dim='time')
                        self.fqpr.multibeam.raw_ping[0].attrs['multibeam_files'] = mfiles
                    else:
                        self.warning_message.setText('ERROR: The sonar types must match in both converted data folders')
                        return True
                else:
                    self.warning_message.setText('ERROR: The serial numbers must match in both converted data folders')
                    return True
            else:
                self.warning_message.setText('ERROR: Invalid path to converted data store')
                return True
            return False
        except:
            return True
Exemplo n.º 9
0
            self.heave_plot.clear()
            self.heading_plot.clear()
        except IndexError:
            pass


if __name__ == '__main__':
    try:  # pyside2
        app = QtWidgets.QApplication()
    except TypeError:  # pyqt5
        app = QtWidgets.QApplication([])
    test_window = KlusterAttitudeView()

    try:
        fq = reload_data(
            r"C:\collab\dasktest\data_dir\hassler_acceptance\refsurf\converted",
            show_progress=False)
        att_dat = fq.multibeam.raw_att
    except AttributeError:  # cant find the converted data, use this test data instead
        roll_dat = np.rad2deg(np.sin(np.linspace(-np.pi, np.pi, 2000)))
        pitch_dat = np.rad2deg(np.sin(np.linspace(np.pi, -np.pi, 2000)))
        heave_dat = np.linspace(0, 1, 2000)
        heading_dat = np.linspace(0, 180, 2000)
        time_dat = np.arange(0, 2000)
        att_dat = xr.Dataset(
            {
                'roll': (['time'], roll_dat),
                'pitch': (['time'], pitch_dat),
                'heave': (['time'], heave_dat),
                'heading': (['time'], heading_dat)
            },
Exemplo n.º 10
0
    r"C:\collab\dasktest\data_dir\EM2040c_NRT2\0650_20180711_151518.all",
    outfold=r'C:\collab\dasktest\data_dir\outputtest\concatinorder')
fq_inorder = perform_all_processing(
    r"C:\collab\dasktest\data_dir\EM2040c_NRT2\0653_20180711_152950.all",
    outfold=r'C:\collab\dasktest\data_dir\outputtest\concatinorder')

assert np.array_equal(fq_inorder.multibeam.raw_ping[0].z,
                      fq_outoforder.multibeam.raw_ping[0].z)

###################### PATCH TEST GENERATOR ##########################

from fqpr_convenience import *
from fqpr_generation import *
from xarray_conversion import *

fq = reload_data(r"C:\collab\dasktest\data_dir\EM2040\converted")
xyzrph = fq.multibeam.xyzrph
subset_time = [[
    fq.multibeam.raw_ping[0].time.values[0],
    fq.multibeam.raw_ping[0].time.values[10]
],
               [
                   fq.multibeam.raw_ping[0].time.values[50],
                   fq.multibeam.raw_ping[0].time.values[60]
               ]]

fq, soundings = reprocess_sounding_selection(fq,
                                             new_xyzrph=xyzrph,
                                             subset_time=subset_time,
                                             turn_off_dask=True)
fig = plt.figure()
Exemplo n.º 11
0
                # save to disk
                self.fqpr.write('ping', [rp_detect.to_dataset()],
                                time_array=[rp_detect.time],
                                sys_id=rp.system_identifier,
                                skip_dask=True)
        else:  # expect that the new_status is the same size as the existing status, no subset
            for cnt, rp in enumerate(self.fqpr.multibeam.raw_ping):
                rp_detect = rp['detectioninfo'].load(
                )  # convert to numpy and load in memory
                rp_detect[:] = self.new_status[
                    cnt]  # overwrite with new status
                # save to disk
                self.fqpr.write('ping', [rp_detect.to_dataset()],
                                time_array=[rp_detect.time],
                                sys_id=rp.system_identifier,
                                skip_dask=True)

    def return_controls(self):
        return self.controls


if __name__ == '__main__':
    fm = FilterManager()
    print('Filters currently loaded')
    print(fm.list_filters())

    from HSTB.kluster.fqpr_convenience import reload_data
    fq = reload_data(
        r"C:\collab\dasktest\data_dir\outputtest\tj_patch_test_710")
    fq.filter.run_filter('filter_by_angle', min_angle=-45, max_angle=45)
Exemplo n.º 12
0
def accuracy_test(ref_surf: Union[str, BathyGrid],
                  fq: Union[str, Fqpr],
                  output_directory: str,
                  line_names: Union[str, list] = None,
                  ping_times: tuple = None,
                  show_plots: bool = False):
    """
    Accuracy test: takes a reference surface and accuracy test lines and creates plots of depth difference between
    surface and lines for the soundings nearest the grid nodes.  Plots are by beam/by angle averages.  This function
    will automatically determine the mode and frequency of each line in the dataset to organize the plots.

    Parameters
    ----------
    ref_surf
        a path to a bathygrid instance to load or the already loaded bathygrid instance
    fq
        a path to a fqpr instance to load or the already loaded fqpr instance
    output_directory
        str, where you want to put the plot images
    line_names
        if provided, only returns data for the line(s), otherwise, returns data for all lines
    ping_times
        time to select the dataset by, must be a tuple of (min time, max time) in utc seconds.  If None, will use
        the full min/max time of the dataset
    show_plots
        if True, will show the plots as well as save them to disk
    """

    if isinstance(fq, str):
        fq = reload_data(fq)
    if isinstance(ref_surf, str):
        ref_surf = reload_surface(ref_surf)
    _validate_accuracy_test(ref_surf, fq, line_names)
    os.makedirs(output_directory, exist_ok=True)

    grouped_datasets = {}
    print('loading data...')
    linedata = fq.subset_variables_by_line(
        ['x', 'y', 'z', 'corr_pointing_angle', 'mode', 'frequency', 'modetwo'],
        filter_by_detection=True,
        line_names=line_names,
        ping_times=ping_times)
    for mline, linedataset in linedata.items():
        unique_mode = np.unique(linedataset.mode)
        if len(unique_mode) > 1:
            ucount = [
                np.count_nonzero(linedataset.mode == umode)
                for umode in unique_mode
            ]
            unique_mode = [x for _, x in sorted(zip(ucount, unique_mode))][0]
        else:
            unique_mode = unique_mode[0]
        unique_modetwo = np.unique(linedataset.modetwo)
        if len(unique_modetwo) > 1:
            ucount = [
                np.count_nonzero(linedataset.modetwo == umode)
                for umode in unique_modetwo
            ]
            unique_modetwo = [
                x for _, x in sorted(zip(ucount, unique_modetwo))
            ][0]
        else:
            unique_modetwo = unique_modetwo[0]
        freq_numbers = np.unique(linedataset.frequency)
        lens = np.max(np.unique([len(str(id)) for id in freq_numbers]))
        freqs = [f for f in freq_numbers if len(str(f)) == lens]
        digits = -(len(str(freqs[0])) - 1)
        rounded_freq = list(np.unique([np.around(f, digits)
                                       for f in freqs]))[0]
        print('{}: mode {} modetwo {} frequency {}'.format(
            mline, unique_mode, unique_modetwo, rounded_freq))
        dkey = '{}-{}-{}hz'.format(unique_mode, unique_modetwo, rounded_freq)
        if dkey not in grouped_datasets:
            grouped_datasets[dkey] = linedataset
        else:
            grouped_datasets[dkey] = xr.concat(
                [grouped_datasets[dkey], linedataset], dim='sounding')

    print('building plots...')
    for dkey, dset in grouped_datasets.items():
        depth_diff, surf_depth, soundings_beam, soundings_angle = difference_grid_and_soundings(
            ref_surf, dset)

        # for plots, we limit to max 30000 soundings, the plot chokes with more than that
        soundings_filter = int(np.ceil(len(soundings_beam) / 30000))
        filter_beam = soundings_beam[::soundings_filter]
        filter_angle = soundings_angle[::soundings_filter]
        filter_diff = depth_diff[::soundings_filter]
        filter_surf = surf_depth[::soundings_filter]

        d_rel_a_avg, d_rel_a_stddev, depth_offset, angbins = _acctest_generate_stats(
            filter_angle, filter_diff, bin_size=1)
        d_rel_b_avg, d_rel_b_stddev, depth_offset, beambins = _acctest_generate_stats(
            filter_beam, filter_diff, bin_size=1)

        _acctest_plots(d_rel_b_avg,
                       d_rel_b_stddev,
                       filter_beam,
                       beambins,
                       filter_diff,
                       filter_surf,
                       depth_offset,
                       mode='beam',
                       output_pth=os.path.join(output_directory,
                                               dkey + '_acc_beam.png'),
                       show=show_plots)
        _acctest_plots(d_rel_a_avg,
                       d_rel_a_stddev,
                       filter_angle,
                       angbins,
                       filter_diff,
                       filter_surf,
                       depth_offset,
                       mode='angle',
                       output_pth=os.path.join(output_directory,
                                               dkey + '_acc_angle.png'),
                       show=show_plots)
    print('Accuracy test complete.')
# loading from a post processed Applanix SBET file
fq = import_processed_navigation(fq, [r'C:\data_dir\sbet.out'],
                                 [r'C:\data_dir\smrmsg.out'],
                                 [r'C:\data_dir\export_log.txt'])

# now you can process the converted/imported data.  The defaults will do a full processing run on all data, but you can
#  specify a few things if you like
# the default run
fq = process_multibeam(fq)
# include another sound velocity file
fq = process_multibeam(fq, add_cast_files=r'C:\data_dir\mysvpfile.svp')
# specify coordinate system and vertical reference
fq = process_multibeam(fq, coord_system='WGS84', vert_ref='ellipse')

# reload the data later on
fq = reload_data(r"C:\data_dir\converted")
#####################################
# 2. merged lower level approach
#####################################

# all the above can be combined into the perform_all_processing command for ease of use
fq = perform_all_processing(r"C:\data_dir\0009_20170523_181119_FA2806.all",
                            navfiles=[r'C:\data_dir\sbet.out'],
                            errorfiles=[r'C:\data_dir\smrmsg.out'],
                            logfiles=[r'C:\data_dir\export_log.txt'],
                            add_cast_files=r'C:\data_dir\mysvpfile.svp',
                            coord_system='WGS84',
                            vert_ref='ellipse')
# reload the data later on
fq = reload_data(r"C:\data_dir\converted")
########################################################################
Exemplo n.º 14
0
# gdal_translate "C:\vdatum_all_20201203\vdatum\FLpensac02_8301\mllw.gtx" "C:\vdatum_all_20201203\vdatum\chart_datum_depth_rev.tif" -co APPEND_SUBDATASET=YES
# gdal_translate "C:\vdatum_all_20201203\vdatum\FLjoseph03_8301\mllw.gtx" "C:\vdatum_all_20201203\vdatum\chart_datum_depth_rev.tif" -co APPEND_SUBDATASET=YES
# gdal_translate "C:\vdatum_all_20201203\vdatum\FLGAeastshelf41_8301\mllw.gtx" "C:\vdatum_all_20201203\vdatum\chart_datum_depth_rev.tif" -co APPEND_SUBDATASET=YES
# gdal_translate "C:\vdatum_all_20201203\vdatum\FLGAeastbays31_8301\mllw.gtx" "C:\vdatum_all_20201203\vdatum\chart_datum_depth_rev.tif" -co APPEND_SUBDATASET=YES
# gdal_translate "C:\vdatum_all_20201203\vdatum\FLapalach01_8301\mllw.gtx" "C:\vdatum_all_20201203\vdatum\chart_datum_depth_rev.tif" -co APPEND_SUBDATASET=YES
# gdal_translate "C:\vdatum_all_20201203\vdatum\FLandrew02_8301\mllw.gtx" "C:\vdatum_all_20201203\vdatum\chart_datum_depth_rev.tif" -co APPEND_SUBDATASET=YES

# gdalbuildvrt -input_file_list "C:\vdatum_all_20201203\vdatum\vrt_file_list.txt" "C:\vdatum_all_20201203\vdatum\chart_datum_depth.vrt"

###############################################################################################

from HSTB.kluster.fqpr_generation import *
from HSTB.kluster.fqpr_convenience import reload_data
from HSTB.kluster.modules.georeference import *

fq = reload_data(r"C:\collab\dasktest\data_dir\outputtest\EM2040_BHII")
self = fq
subset_time: list = None
prefer_pp_nav: bool = True
dump_data: bool = True
delete_futs: bool = True
vdatum_directory: str = None

self._validate_georef_xyz(subset_time, dump_data)
self.logger.info(
    '****Georeferencing sound velocity corrected beam offsets****\n')
starttime = perf_counter()

self.logger.info('Using pyproj CRS: {}'.format(
    self.horizontal_crs.to_string()))