def test_seconds_to_formatted_string(self): assert seconds_to_formatted_string(666) == '11 minutes, 6 seconds' assert seconds_to_formatted_string( 66666) == '18 hours, 31 minutes, 6 seconds' assert seconds_to_formatted_string( 6666666) == '1851 hours, 51 minutes, 6 seconds' assert seconds_to_formatted_string(0) == '0 seconds' assert seconds_to_formatted_string(-1) == '0 seconds'
def _export_tracklines_to_geopackage(self, linenames: list, output_file: str): """ Build a new geopackage file and create a new feature for each line, where each feature has a name of linename, and data according to the latitude/longitude of that line. """ self.fqpr.logger.info('****Exporting tracklines to geopackage****') starttime = perf_counter() vl = VectorLayer(output_file, 'GPKG', kluster_variables.qgis_epsg, update=True) for line in linenames: if line in self.fqpr.multibeam.raw_ping[0].multibeam_files: line_start_time, line_end_time = self.fqpr.multibeam.raw_ping[0].multibeam_files[line][0],\ self.fqpr.multibeam.raw_ping[0].multibeam_files[line][1] nav = self.fqpr.return_navigation(line_start_time, line_end_time) if nav is not None: vl.write_to_layer(line, np.column_stack([ nav.longitude.values, nav.latitude.values ]), 2) # ogr.wkbLineString else: print( f'export_lines_to_geopackage: unable to access raw navigation for line {line}' ) vl.close() endtime = perf_counter() self.fqpr.logger.info( '****Exporting tracklines to geopackage complete: {}****\n'.format( seconds_to_formatted_string(int(endtime - starttime))))
def run_patch(self): """ Run the patch test procedure, saving the adjustments to the result attribute. """ print('Initializing patch test for lines {}'.format(list(self.multibeam_files.keys()))) starttime = perf_counter() self._build_initial_points() endtime = perf_counter() print('Initialization complete: {}'.format(seconds_to_formatted_string(int(endtime - starttime)))) for i in range(3): print('****Patch run {} start****'.format(i + 1)) starttime = perf_counter() self._generate_rotated_points() self._grid() self._build_patch_test_values() self._compute_least_squares() self._reprocess_points() endtime = perf_counter() print('****Patch run {} complete: {}****'.format(i + 1, seconds_to_formatted_string(int(endtime - starttime))))
def export_soundings_to_file(self, datablock: list, output_directory: str = None, file_format: str = 'csv', csv_delimiter=' ', filter_by_detection: bool = True, z_pos_down: bool = True): """ A convenience method for exporting the data currently in the Kluster Points View to file. Parameters ---------- datablock list of [sounding_id, head_index, x, y, z, tvu, rejected, pointtime, beam, linename] arrays, all of the same size and shape. sounding_id is the name of the converted instance for each sounding output_directory optional, destination directory for the xyz exports, otherwise will auto export next to converted data file_format optional, destination file format, default is csv file, options include ['csv', 'las', 'entwine'] csv_delimiter optional, if you choose file_format=csv, this will control the delimiter filter_by_detection optional, if True will only write soundings that are not rejected z_pos_down if True, will export soundings with z positive down (this is the native Kluster convention), only for csv export Returns ------- list list of written file paths """ chunksize, fldr_path, entwine_fldr_path, suffix = self._validate_export( output_directory, file_format) if not chunksize: return [] self.fqpr.logger.info( '****Exporting xyz data to {}****'.format(file_format)) starttime = perf_counter() written_files = [] if datablock: try: base_name = os.path.split( self.fqpr.multibeam.converted_pth)[1] + '_pointsview' sounding_id, head_index, x, y, z, tvu, rejected, pointtime, beam, linename = datablock except: raise ValueError( 'export_soundings_to_file: datablock should be length 9 with sounding_id, x, y, z, tvu, rejected, pointtime, beam, linename, found length {}' .format(len(datablock))) if not x.any(): print( 'export_soundings_to_file: no sounding data provided to export' ) return written_files unc_included = False if tvu.any(): unc_included = True if filter_by_detection: valid_detections = rejected != 2 x = x[valid_detections] y = y[valid_detections] z = z[valid_detections] if unc_included: tvu = tvu[valid_detections] rejected = rejected[valid_detections] if file_format in ['las', 'entwine']: z_pos_down = False if not z_pos_down: z = z * -1 if file_format == 'csv': if suffix: dest_path = os.path.join( fldr_path, '{}_{}.csv'.format(base_name, suffix)) else: dest_path = os.path.join(fldr_path, base_name + '.csv') self.fqpr.logger.info('writing to {}'.format(dest_path)) self._csv_write(x, y, z, tvu, unc_included, dest_path, csv_delimiter) else: if suffix: dest_path = os.path.join( fldr_path, '{}_{}.las'.format(base_name, suffix)) else: dest_path = os.path.join(fldr_path, base_name + '.las') self.fqpr.logger.info('writing to {}'.format(dest_path)) self._las_write(x, y, z, tvu, rejected, unc_included, dest_path) if file_format == 'entwine': build_entwine_points(fldr_path, entwine_fldr_path) written_files = [entwine_fldr_path] else: print( 'export_soundings_to_file: no sounding data provided to export' ) endtime = perf_counter() self.fqpr.logger.info( '****Exporting xyz data to {} complete: {}****\n'.format( file_format, seconds_to_formatted_string(int(endtime - starttime)))) return written_files
def export_pings_to_file(self, output_directory: str = None, file_format: str = 'csv', csv_delimiter=' ', filter_by_detection: bool = True, z_pos_down: bool = True, export_by_identifiers: bool = True): """ Uses the output of georef_along_across_depth to build sounding exports. Currently you can export to csv, las or entwine file formats, see file_format argument. This will use all soundings in the dataset. If you export to las and want to retain rejected soundings under the noise classification, set filter_by_detection to False. Filters using the detectioninfo variable if present in multibeam and filter_by_detection is set. Set z_pos_down to False if you want positive up. Otherwise you get positive down. entwine export will build las first, and then entwine from las Parameters ---------- output_directory optional, destination directory for the xyz exports, otherwise will auto export next to converted data file_format optional, destination file format, default is csv file, options include ['csv', 'las', 'entwine'] csv_delimiter optional, if you choose file_format=csv, this will control the delimiter filter_by_detection optional, if True will only write soundings that are not rejected z_pos_down if True, will export soundings with z positive down (this is the native Kluster convention), only for csv export export_by_identifiers if True, will generate separate files for each combination of serial number/sector/frequency Returns ------- list list of written file paths """ chunksize, fldr_path, entwine_fldr_path, suffix = self._validate_export( output_directory, file_format) if not chunksize: return [] self.fqpr.logger.info( '****Exporting xyz data to {}****'.format(file_format)) starttime = perf_counter() chunk_count = 0 written_files = [] for rp in self.fqpr.multibeam.raw_ping: self.fqpr.logger.info('Operating on system {}'.format( rp.system_identifier)) # build list of lists for the mintime and maxtime (inclusive) for each chunk, each chunk will contain number of pings equal to chunksize chunktimes = [[ float(rp.time.isel(time=int(i * chunksize))), float( rp.time.isel( time=int(min((i + 1) * chunksize - 1, rp.time.size - 1)))) ] for i in range(int(np.ceil(rp.time.size / 75000)))] for mintime, maxtime in chunktimes: chunk_count += 1 if suffix: new_suffix = suffix + '_{}'.format(chunk_count) else: new_suffix = '{}'.format(chunk_count) new_files = None slice_rp = slice_xarray_by_dim(rp, dimname='time', start_time=mintime, end_time=maxtime) if file_format == 'csv': new_files = self._export_pings_to_csv( rp=slice_rp, output_directory=fldr_path, suffix=new_suffix, csv_delimiter=csv_delimiter, filter_by_detection=filter_by_detection, z_pos_down=z_pos_down, export_by_identifiers=export_by_identifiers) elif file_format in ['las', 'entwine']: new_files = self._export_pings_to_las( rp=slice_rp, output_directory=fldr_path, suffix=new_suffix, filter_by_detection=filter_by_detection, export_by_identifiers=export_by_identifiers) if new_files: written_files += new_files if file_format == 'entwine': build_entwine_points(fldr_path, entwine_fldr_path) written_files = [entwine_fldr_path] endtime = perf_counter() self.fqpr.logger.info( '****Exporting xyz data to {} complete: {}****\n'.format( file_format, seconds_to_formatted_string(int(endtime - starttime)))) return written_files
def export_lines_to_file(self, linenames: list = None, output_directory: str = None, file_format: str = 'csv', csv_delimiter=' ', filter_by_detection: bool = True, z_pos_down: bool = True, export_by_identifiers: bool = True): """ Take each provided line name and export it to the file_format provided Parameters ---------- linenames list of linenames that we want to export, if None this will export all lines output_directory optional, destination directory for the xyz exports, otherwise will auto export next to converted data file_format optional, destination file format, default is csv file, options include ['csv', 'las', 'entwine'] csv_delimiter optional, if you choose file_format=csv, this will control the delimiter filter_by_detection optional, if True will only write soundings that are not rejected z_pos_down if True, will export soundings with z positive down (this is the native Kluster convention), only for csv export export_by_identifiers if True, will generate separate files for each combination of serial number/sector/frequency Returns ------- list list of written file paths """ self.fqpr.logger.info( '****Exporting xyz data to {}****'.format(file_format)) starttime = perf_counter() chunksize, fldr_path, entwine_fldr_path, suffix = self._validate_export( output_directory, file_format) if not chunksize: return [] if linenames is None: linenames = list( self.fqpr.multibeam.raw_ping[0].multibeam_files.keys()) totalfiles = [] for linename in linenames: try: data_dict = self.fqpr.subset_variables_by_line( [ 'x', 'y', 'z', 'tvu', 'frequency', 'txsector_beam', 'detectioninfo' ], [linename], filter_by_detection=filter_by_detection) except: data_dict = self.fqpr.subset_variables_by_line( [ 'x', 'y', 'z', 'frequency', 'txsector_beam', 'detectioninfo' ], [linename], filter_by_detection=filter_by_detection) if data_dict: # we could get the data_dict for all lines at once, but we do it line by line to avoid memory issues line_rp = data_dict[linename] new_files = [] if file_format == 'csv': new_files = self._export_pings_to_csv( rp=line_rp, output_directory=fldr_path, suffix=suffix, csv_delimiter=csv_delimiter, filter_by_detection=False, z_pos_down=z_pos_down, export_by_identifiers=export_by_identifiers, base_name=os.path.splitext(linename)[0]) elif file_format in ['las', 'entwine']: new_files = self._export_pings_to_las( rp=line_rp, output_directory=fldr_path, suffix=suffix, filter_by_detection=False, export_by_identifiers=export_by_identifiers, base_name=os.path.splitext(linename)[0]) if new_files: totalfiles += new_files endtime = perf_counter() self.fqpr.logger.info( '****Exporting xyz data to {} complete: {}****\n'.format( file_format, seconds_to_formatted_string(int(endtime - starttime)))) return totalfiles