return 4.8 def datetime64_str(time_str: Optional[str] = None) -> np.ndarray: """ Reformat time_str to ISO 8601 or to 'NaT'. Used here for input in funcs that converts str to numpy.datetime64 :param time_str: May be 'NaT' :return: ndarray of strings (tested for 1 element only) formatted by numpy. """ return np.datetime_as_string(np.datetime64(time_str, 's')) probes = probes or range( 9, 40) # sets default range, specify your values before line --- if st( 1 ): # Can not find additional not corrected files for same probe if already have any corrected in search path (move them out if need) i_proc_probe = 0 # counter of processed probes i_proc_file = 0 # counter of processed files # patten to identify only _probe_'s raw data files that need to correct '*INKL*{:0>2}*.[tT][xX][tT]': raw_pattern = f'*{prefix.replace("incl","inkl").upper()}_{{:0>3}}*.[tT][xX][tT]' raw_parent = path_cruise / dir_incl / '_raw' for probe in probes: correct_fun = partial(correct_kondrashov_txt if prefix == 'incl' else correct_baranov_txt, dir_out=raw_parent) raw_found = [] if not raw_archive_name: raw_found = list(raw_parent.glob(raw_pattern.format(probe))) if not raw_found: # Check if already have corrected files for probe generated by correct_kondrashov_txt(). If so then just use them
# Stop before steps that need manual preparings (70) i.e. set end < 70 at first # Gridding (last step) needs debugging if interactive filtering is needed # --------------------------------------------------------------------------------------------- # if st(1, 'Save gpx navigation to DB'): # # Save navigation to DB # gpx2h5(['', # '--db_path', str(path_db), # '--path', str(path_cruise / r'navigation\_raw\*.gpx'), # '--tables_list', ',navigation,', # skip waypoints # '--table_prefix', r'', # # '--min_date', '2019-07-17T14:00:00', # '--b_interact', '0', # ]) if st(5, "Save Supervisor's navigation to DB"): # Save navigation to DB csv2h5([ 'ini/csv_nav_supervisor.ini', '--db_path', str(path_db), '--path', str(path_cruise / r'navigation\bridge\??????.txt'), '--table', 'navigation', # skip waypoints #'--b_remove_duplicates', 'True', #'--csv_specific_param_dict', 'DepEcho_add:4.5', '--min_dict', 'DepEcho:6', '--b_interact', '0',
def main(new_arg=None, **kwargs): """ :param new_arg: list of strings, command line arguments :kwargs: dicts of dictcts (for each ini section): specified values overwrites ini values """ # global l cfg = cfg_from_args(my_argparser(), new_arg, **kwargs) cfg['in']['db_coefs'] = Path(cfg['in']['db_coefs']) for path_field in ['db_coefs', 'path_cruise']: if not cfg['in'][path_field].is_absolute(): cfg['in'][path_field] = ( cfg['in']['cfgFile'].parent / cfg['in'][path_field] ).resolve().absolute() # cfg['in']['cfgFile'].parent / def constant_factory(val): def default_val(): return val return default_val for lim in ('min_date', 'max_date'): cfg['filter'][lim] = defaultdict( constant_factory(cfg['filter'][lim].get( '0', cfg['filter'][lim].get(0))), cfg['filter'][lim]) l = init_logging(logging, None, None, 'INFO') #l = init_logging(logging, None, cfg['program']['log'], cfg['program']['verbose']) if True: # False. Experimental speedup but takes memory from dask.cache import Cache cache = Cache(2e9) # Leverage two gigabytes of memory cache.register() # Turn cache on globally if cfg['program']['dask_scheduler']: if cfg['program']['dask_scheduler'] == 'distributed': from dask.distributed import Client client = Client( processes=False ) # navigate to http://localhost:8787/status to see the diagnostic dashboard if you have Bokeh installed # processes=False: avoide inter-worker communication for computations releases the GIL (numpy, da.array) # without is error else: if cfg['program']['dask_scheduler'] == 'synchronous': l.warning('using "synchronous" scheduler for debugging') import dask dask.config.set(scheduler=cfg['program']['dask_scheduler']) # Run steps : st.start = cfg['program']['step_start'] st.end = cfg['program']['step_end'] st.go = True if not cfg['out'][ 'db_name']: # set name by 'path_cruise' name or parent if it has digits at start. priority for name is "*inclinometer*" for p in (lambda p: [p, p.parent])(cfg['in']['path_cruise']): m = re.match('(^[\d_]*).*', p.name) if m: break cfg['out']['db_name'] = f"{m.group(1).strip('_')}incl.h5" cfg['in']['path_cruise'].glob('*inclinometer*') dir_incl = next((d for d in cfg['in']['path_cruise'].glob('*inclinometer*') if d.is_dir()), cfg['in']['path_cruise']) db_path = dir_incl / cfg['out']['db_name'] # --------------------------------------------------------------------------------------------- def fs(probe, name): return 5 # if 'w' in name.lower(): # Baranov's wavegauge electronic # return 5 # 10 # if probe < 20 or probe in [23, 29, 30, 32, 33]: # 30 [4, 11, 5, 12] + [1, 7, 13, 30] # return 5 # if probe in [21, 25, 26] + list(range(28, 35)): # return 8.2 # return 4.8 def datetime64_str(time_str: Optional[str] = None) -> np.ndarray: """ Reformat time_str to ISO 8601 or to 'NaT'. Used here for input in funcs that converts str to numpy.datetime64 :param time_str: May be 'NaT' :return: ndarray of strings (tested for 1 element only) formatted by numpy. """ return np.datetime_as_string(np.datetime64(time_str, 's')) probes = cfg['in']['probes'] or range( 1, 41) # sets default range, specify your values before line --- raw_root, subs_made = re.subn('INCL_?', 'INKL_', cfg['in']['probes_prefix'].upper()) if st( 1 ): # Can not find additional not corrected files for same probe if already have any corrected in search path (move them out if need) i_proc_probe = 0 # counter of processed probes i_proc_file = 0 # counter of processed files # patten to identify only _probe_'s raw data files that need to correct '*INKL*{:0>2}*.[tT][xX][tT]': raw_parent = dir_incl / '_raw' dir_out = raw_parent / re.sub( r'[.\\/ ]', '_', cfg['in']['raw_subdir'] ) # sub replaces multilevel subdirs to 1 level that correct_fun() can only make raw_parent /= cfg['in']['raw_subdir'] for probe in probes: raw_found = [] raw_pattern_file = cfg['in']['raw_pattern'].format(prefix=raw_root, number=probe) correct_fun = partial( correct_kondrashov_txt if subs_made else correct_baranov_txt, dir_out=dir_out) # if not archive: if (not '.zip' in cfg['in']['raw_subdir'].lower() and not '.rar' in cfg['in']['raw_subdir'].lower()) or raw_parent.is_dir(): raw_found = list(raw_parent.glob(raw_pattern_file)) if not raw_found: # Check if already have corrected files for probe generated by correct_kondrashov_txt(). If so then just use them raw_found = list( raw_parent.glob( f"{cfg['in']['probes_prefix']}{probe:0>2}.txt")) if raw_found: print('corrected csv file', [r.name for r in raw_found], 'found') correct_fun = lambda x: x elif not cfg['in']['raw_subdir']: continue for file_in in (raw_found or open_csv_or_archive_of_them( raw_parent, binary_mode=False, pattern=raw_pattern_file)): file_in = correct_fun(file_in) if not file_in: continue tbl = f"{cfg['in']['probes_prefix']}{probe:0>2}" # tbl = re.sub('^((?P<i>inkl)|w)_0', lambda m: 'incl' if m.group('i') else 'w', # correct name # re.sub('^[\d_]*|\*', '', file_in.stem).lower()), # remove date-prefix if in name csv2h5( [ str( Path(__file__).parent / 'ini' / f"csv_inclin_{'Kondrashov' if subs_made else 'Baranov'}.ini" ), '--path', str(file_in), '--blocksize_int', '50_000_000', # 50Mbt '--table', tbl, '--db_path', str(db_path), # '--log', str(scripts_path / 'log/csv2h5_inclin_Kondrashov.log'), # '--b_raise_on_err', '0', # ? '--b_interact', '0', '--fs_float', f'{fs(probe, file_in.stem)}', '--dt_from_utc_seconds', str(cfg['in']['dt_from_utc'].total_seconds()), '--b_del_temp_db', '1', ] + (['--csv_specific_param_dict', 'invert_magnitometr: True'] if subs_made else ['--cols_load_list', "yyyy,mm,dd,HH,MM,SS,P,U"]), **{ 'filter': { 'min_date': cfg['filter']['min_date'][probe], 'max_date': cfg['filter']['max_date'][probe], } }) # Get coefs: l.info( f"Adding coefficients to {db_path}/{tbl} from {cfg['in']['db_coefs']}" ) try: h5copy_coef(cfg['in']['db_coefs'], db_path, tbl) except KeyError as e: # Unable to open object (component not found) l.warning( 'No coefs to copy?' ) # write some dummy coefficients to can load Veusz patterns: h5copy_coef(None, db_path, tbl, dict_matrices=dict_matrices_for_h5(tbl=tbl)) except OSError as e: l.warning( 'Not found DB with coefs?' ) # write some dummy coefficients to can load Veusz patterns: h5copy_coef(None, db_path, tbl, dict_matrices=dict_matrices_for_h5(tbl=tbl)) i_proc_file += 1 else: print('no', raw_pattern_file, end=', ') i_proc_probe += 1 print('Ok:', i_proc_probe, 'probes,', i_proc_file, 'files processed.') # Calculate velocity and average if st(2): # if aggregate_period_s is None then not average and write to *_proc_noAvg.h5 else loading from that h5 and writing to _proc.h5 if not cfg['out']['aggregate_period_s']: cfg['out']['aggregate_period_s'] = [ None, 2, 600, 3600 if 'w' in cfg['in']['probes_prefix'] else 7200 ] if cfg['in']['azimuth_add']: if 'Lat' in cfg['in']['azimuth_add']: from datetime import datetime # add magnetic declination,° for used coordinates # todo: get time azimuth_add = mag_dec(cfg['in']['azimuth_add']['Lat'], cfg['in']['azimuth_add']['Lon'], datetime(2020, 9, 10), depth=-1) else: azimuth_add = 0 if 'constant' in cfg['in']['azimuth_add']: # and add constant. For example, subtruct declination at the calibration place if it was applied azimuth_add += cfg['in']['azimuth_add'][ 'constant'] # add -6.65644183° to account for calibration in Kaliningrad for aggregate_period_s in cfg['out']['aggregate_period_s']: if aggregate_period_s is None: db_path_in = db_path db_path_out = db_path.with_name( f'{db_path.stem}_proc_noAvg.h5') else: db_path_in = db_path.with_name(f'{db_path.stem}_proc_noAvg.h5') db_path_out = f'{db_path.stem}_proc.h5' # or separately: '_proc{aggregate_period_s}.h5' args = [ Path(incl_h5clc.__file__).with_name( f'incl_h5clc_{db_path.stem}.yaml'), # if no such file all settings are here '--db_path', str(db_path_in), # ! 'incl.*|w\d*' inclinometers or wavegauges w\d\d # 'incl09': '--tables_list', 'incl.*' if not cfg['in']['probes'] else f"incl.*(?:{'|'.join('{:0>2}'.format(p) for p in cfg['in']['probes'])})", '--aggregate_period', f'{aggregate_period_s}S' if aggregate_period_s else '', '--out.db_path', str(db_path_out), '--table', f'V_incl_bin{aggregate_period_s}' if aggregate_period_s else 'V_incl', '--verbose', 'INFO', #'DEBUG' get many numba messages '--b_del_temp_db', '1', # '--calc_version', 'polynom(force)', # depreshiated # '--chunksize', '20000', # '--not_joined_h5_path', f'{db_path.stem}_proc.h5', ] # if aggregate_period_s <= 5: # [s], do not need split csv for big average interval # args += (['--split_period', '1D']) if aggregate_period_s is None: # proc. parameters (if we have saved proc. data then when aggregating we are not processing) args += ([ '--max_dict', 'M[xyz]:4096', # Note: for Baranov's prog 4096 is not suited # '--timerange_zeroing_dict', "incl19: '2019-11-10T13:00:00', '2019-11-10T14:00:00'\n," # not works - use kwarg # '--timerange_zeroing_list', '2019-08-26T04:00:00, 2019-08-26T05:00:00' '--split_period', '1D' ] if subs_made else [ '--bad_p_at_bursts_starts_peroiod', '1H', ]) # csv splitted by 1day (default for no avg) and monolith csv if aggregate_period_s==600 if aggregate_period_s not in cfg['out'][ 'aggregate_period_s_not_to_text']: # , 300, 600]: args += ['--text_path', str(db_path.parent / 'text_output')] kwarg = { 'in': { 'min_date': cfg['filter']['min_date'][0], 'max_date': cfg['filter']['max_date'][0], 'timerange_zeroing': cfg['in']['timerange_zeroing'], 'azimuth_add': azimuth_add } } # If need all data to be combined one after one: # set_field_if_no(kwarg, 'in', {}) # kwarg['in'].update({ # # 'tables': [f'incl{i:0>2}' for i in min_date.keys() if i!=0], # 'dates_min': min_date.values(), # in table list order # 'dates_max': max_date.values(), # # }) # set_field_if_no(kwarg, 'out', {}) # kwarg['out'].update({'b_all_to_one_col': 'True'}) incl_h5clc.main(args, **kwarg) # Calculate spectrograms. if st(3): # Can be done at any time after step 1 def raise_ni(): raise NotImplementedError( 'Can not proc probes having different fs in one run: you need to do it separately' ) args = [ Path(incl_h5clc.__file__).with_name( f'incl_h5spectrum{db_path.stem}.yaml'), # if no such file all settings are here '--db_path', str(db_path.with_name(f'{db_path.stem}_proc_noAvg.h5')), '--tables_list', f"{cfg['in']['probes_prefix']}.*", # inclinometers or wavegauges w\d\d ## 'w02', 'incl.*', # '--aggregate_period', f'{aggregate_period_s}S' if aggregate_period_s else '', '--min_date', datetime64_str(cfg['filter']['min_date'][0]), '--max_date', datetime64_str(cfg['filter']['max_date'] [0]), # '2019-09-09T16:31:00', #17:00:00 # '--max_dict', 'M[xyz]:4096', # use if db_path is not ends with _proc_noAvg.h5 i.e. need calc velocity '--out.db_path', f"{db_path.stem.replace('incl', cfg['in']['probes_prefix'])}_proc_psd.h5", # '--table', f'psd{aggregate_period_s}' if aggregate_period_s else 'psd', '--fs_float', f"{fs(probes[0], cfg['in']['probes_prefix'])}", # (lambda x: x == x[0])(np.vectorize(fs)(probes, prefix))).all() else raise_ni() # # '--timerange_zeroing_list', '2019-08-26T04:00:00, 2019-08-26T05:00:00' # '--verbose', 'DEBUG', # '--chunksize', '20000', '--b_interact', '0', ] if 'w' in cfg['in']['probes_prefix']: args += [ '--split_period', '1H', '--dt_interval_minutes', '10', # burst mode '--fmin', '0.0001', '--fmax', '4' ] else: args += [ '--split_period', '2H', '--fmin', '0.0004', #0.0004 '--fmax', '1.05' ] incl_h5spectrum.main(args) # Draw in Veusz if st(4): b_images_only = True # False pattern_path = db_path.parent / r'vsz_5min\191119_0000_5m_incl19.vsz' # r'vsz_5min\191126_0000_5m_w02.vsz' if not b_images_only: pattern_bytes_slice_old = re.escape(b'((5828756, 5830223, None),)') # Length of not adjacent intervals, s (set None to not allow) period = '1D' length = '5m' # period # '1D' dt_custom_s = pd_period_to_timedelta( length) if length != period else None # None # 60 * 5 if True: # Load starts and assign ends t_intervals_start = pd.read_csv( cfg['in']['path_cruise'] / r'vsz+h5_proc\intervals_selected.txt', converters={ 'time_start': lambda x: np.datetime64(x, 'ns') }, index_col=0).index edges = (pd.DatetimeIndex(t_intervals_start), pd.DatetimeIndex(t_intervals_start + dt_custom_s) ) # np.zeros_like() else: # Generate periodic intervals t_interval_start, t_intervals_end = intervals_from_period( datetime_range=np.array( [ cfg['filter']['min_date']['0'], cfg['filter']['max_date']['0'] ], # ['2018-08-11T18:00:00', '2018-09-06T00:00:00'], # ['2019-02-11T13:05:00', '2019-03-07T11:30:00'], # ['2018-11-16T15:19', '2018-12-14T14:35'], # ['2018-10-22T12:30', '2018-10-27T06:30:00'], 'datetime64[s]'), period=period) edges = (pd.DatetimeIndex([t_interval_start ]).append(t_intervals_end[:-1]), pd.DatetimeIndex(t_intervals_end)) for i, probe in enumerate(probes): probe_name = f"{cfg['in']['probes_prefix']}{probe:02}" # table name in db l.info('Draw %s in Veusz: %d intervals...', probe_name, edges[0].size) # for i_interval, (t_interval_start, t_interval_end) in enumerate(zip(pd.DatetimeIndex([t_interval_start]).append(t_intervals_end[:-1]), t_intervals_end), start=1): cfg_vp = {'veusze': None} for i_interval, (t_interval_start, t_interval_end) in enumerate(zip(*edges), start=1): # if i_interval < 23: #<= 0: # TEMPORARY Skip this number of intervals # continue if period != length: t_interval_start = t_interval_end - pd.Timedelta( dt_custom_s, 's') try: # skipping absent probes start_end = h5q_interval2coord( db_path=str(db_path), table=f'/{probe_name}', t_interval=(t_interval_start, t_interval_end)) if not len(start_end): break # no data except KeyError: break # device name not in specified range, go to next name pattern_path_new = pattern_path.with_name( f"{t_interval_start:%y%m%d_%H%M}_{length}_{probe_name}.vsz" ) # Modify pattern file if not b_images_only: probe_name_old = re.match('.*((?:incl|w)\d*).*', pattern_path.name).groups()[0] bytes_slice = bytes( '(({:d}, {:d}, None),)'.format(*(start_end + np.int32([-1, 1]))), 'ascii') def f_replace(line): """ Replace in file 1. probe name 2. slice """ # if i_interval == 1: line, ok = re.subn(bytes(probe_name_old, 'ascii'), bytes(probe_name, 'ascii'), line) if ok: # can be only in same line line = re.sub(pattern_bytes_slice_old, bytes_slice, line) return line if not rep_in_file(pattern_path, pattern_path_new, f_replace=f_replace): l.warning('Veusz pattern not changed!') # break elif cfg_vp['veusze']: cfg_vp['veusze'].Load(str(pattern_path_new)) elif cfg_vp['veusze']: cfg_vp['veusze'].Load(str(pattern_path_new)) txt_time_range = \ """ "[['{:%Y-%m-%dT%H:%M}', '{:%Y-%m-%dT%H:%M}']]" \ """.format(t_interval_start, t_interval_end) print(f'{i_interval}. {txt_time_range}', end=' ') cfg_vp = veuszPropagate.main( [ Path(veuszPropagate.__file__).parent.with_name( 'veuszPropagate.ini'), # '--data_yield_prefix', '-', '--path', str( db_path ), # use for custom loading from db and some source is required '--tables_list', f'/{probe_name}', # 181022inclinometers/ \d* '--pattern_path', str(pattern_path_new), # fr'd:\workData\BalticSea\190801inclinometer_Schuka\{probe_name}_190807_1D.vsz', # str(db_path.parent / dir_incl / f'{probe_name}_190211.vsz'), #warning: create file with small name # '--before_next', 'restore_config', # '--add_to_filename', f"_{t_interval_start:%y%m%d_%H%M}_{length}", '--filename_fun', f'lambda tbl: "{pattern_path_new.name}"', '--add_custom_list', 'USEtime', # nAveragePrefer', '--add_custom_expressions_list', txt_time_range, # + """ # ", 5" # """, '--b_update_existed', 'True', '--export_pages_int_list', '1, 2', # 0 for all '6, 7, 8', #'1, 2, 3' # '--export_dpi_int', '200', '--export_format', 'emf', '--b_interact', '0', '--b_images_only', f'{b_images_only}', '--return', '<embedded_object>', # reuse to not bloat memory ], veusze=cfg_vp['veusze'])
def main(config: ConfigType) -> None: """ ---------------------------- Save data to Pandas HDF5 store*.h5 ---------------------------- The store contains tables for each device and each device table contains log with metadata of recording sessions :param config: with fields: - in - mapping with fields: - tables_log: - log table name or pattern str for it: in pattern '{}' will be replaced by data table name - cols_good_data: - ['dt_from_utc', 'db', 'db_path', 'table_nav'] - out - mapping with fields: - cols: can use i - data row number and i_log_row - log row number that is used to load data range - cols_log: can use i - log row number - text_date_format - file_name_fun, file_name_fun_log - {fun} part of "lambda rec_num, t_st, t_en: {fun}" string to compile function for name of data and log text files - sep """ global cfg cfg = to_vaex_hdf5.cfg_dataclasses.main_init(config, cs_store_name) cfg_in = cfg.pop('input') cfg_in['cfgFile'] = cs_store_name cfg['in'] = cfg_in # try: # cfg = to_vaex_hdf5.cfg_dataclasses.main_init_input_file(cfg, cs_store_name, ) # except Ex_nothing_done: # pass # existed db is not mandatory device_path, cfg['out']['db_path'] = device_in_out_paths( db_path=cfg['out'].get('db_path'), path_cruise=cfg['in']['path_cruise'], device_short_name=cfg['in']['probes_prefix'], device_dir_pattern='*inclinometer*') out = cfg['out'] # h5init(cfg['in'], out) probes = cfg['in']['probes'] or range( 1, 41) # sets default range, specify your values before line --- raw_root, probe_is_incl = re.subn('INCL_?', 'INKL_', cfg['in']['probes_prefix'].upper()) # some parameters that depends of probe type (indicated by probes_prefix) p_type = defaultdict( # baranov's format constant_factory({ 'correct_fun': partial(correct_txt, mod_file_name=mod_incl_name, sub_str_list=[ b'^\r?(?P<use>20\d{2}(\t\d{1,2}){5}(\t\d{5}){8}).*', b'^.+' ]), 'fs': 10, 'format': 'Baranov', }), { 'incl': { 'correct_fun': partial( correct_txt, mod_file_name=mod_incl_name, sub_str_list=[ b'^(?P<use>20\d{2}(,\d{1,2}){5}(,\-?\d{1,6}){6}(,\d{1,2}\.\d{2})(,\-?\d{1,3}\.\d{2})).*', b'^.+' ]), 'fs': 5, 'format': 'Kondrashov', }, 'voln': { 'correct_fun': partial( correct_txt, mod_file_name=mod_incl_name, sub_str_list=[ b'^(?P<use>20\d{2}(,\d{1,2}){5}(,\-?\d{1,8})(,\-?\d{1,2}\.\d{2}){2}).*', b'^.+' ]), 'fs': 5, #'tbl_prefix': 'w', 'format': 'Kondrashov', } }) if st(1, 'Save inclinometer or wavegage data from ASCII to HDF5'): # Note: Can not find additional not corrected files for same probe if already have any corrected in search path (move them out if need) i_proc_probe = 0 # counter of processed probes i_proc_file = 0 # counter of processed files # patten to identify only _probe_'s raw data files that need to correct '*INKL*{:0>2}*.[tT][xX][tT]': raw_parent = dir_incl / '_raw' # raw_parent /= if cfg['in']['raw_subdir'] is None: cfg['in']['raw_subdir'] = '' dir_out = raw_parent / re.sub(r'[.\\/ *?]', '_', cfg['in']['raw_subdir']) # sub replaces multilevel subdirs to 1 level that correct_fun() can only make def dt_from_utc_2000(probe): """ Correct time of probes started without time setting. Raw date must start from 2000-01-01T00:00""" return ( datetime(year=2000, month=1, day=1) - cfg['in']['time_start_utc'][probe] ) if cfg['in']['time_start_utc'].get(probe) else timedelta(0) # convert cfg['in']['dt_from_utc'] keys to int cfg['in']['dt_from_utc'] = { int(p): v for p, v in cfg['in']['dt_from_utc'].items() } # convert cfg['in']['t_start_utc'] to cfg['in']['dt_from_utc'] and keys to int cfg['in']['dt_from_utc'].update( # overwriting the 'time_start_utc' where already exist {int(p): dt_from_utc_2000(p) for p, v in cfg['in']['time_start_utc'].items()} ) # make cfg['in']['dt_from_utc'][0] be default value cfg['in']['dt_from_utc'] = defaultdict( constant_factory(cfg['in']['dt_from_utc'].pop(0, timedelta(0))), cfg['in']['dt_from_utc']) for probe in probes: raw_found = [] raw_pattern_file = str( Path(glob.escape(cfg['in']['raw_subdir'])) / cfg['in']['raw_pattern'].format(prefix=raw_root, number=probe)) correct_fun = p_type[cfg['in']['probes_prefix']]['correct_fun'] # if not archive: if (not re.match(r'.*(\.zip|\.rar)$', cfg['in']['raw_subdir'], re.IGNORECASE)) and raw_parent.is_dir(): raw_found = list(raw_parent.glob(raw_pattern_file)) if not raw_found: # Check if already have corrected files for probe generated by correct_txt(). If so then just use them raw_found = list( dir_out.glob( f"{cfg['in']['probes_prefix']}{probe:0>2}.txt")) if raw_found: print('corrected csv file', [r.name for r in raw_found], 'found') correct_fun = lambda x, dir_out: x elif not cfg['in']['raw_subdir']: continue for file_in in (raw_found or open_csv_or_archive_of_them( raw_parent, binary_mode=False, pattern=raw_pattern_file)): file_in = correct_fun(file_in, dir_out=dir_out) if not file_in: continue tbl = file_in.stem # f"{cfg['in']['probes_prefix']}{probe:0>2}" # tbl = re.sub('^((?P<i>inkl)|w)_0', lambda m: 'incl' if m.group('i') else 'w', # correct name # re.sub('^[\d_]*|\*', '', file_in.stem).lower()), # remove date-prefix if in name csv2h5( [ str( Path(__file__).parent / 'ini' / f"csv_{'inclin' if probe_is_incl else 'wavegage'}_{p_type[cfg['in']['probes_prefix']]['format']}.ini" ), '--path', str(file_in), '--blocksize_int', '50_000_000', # 50Mbt '--table', tbl, '--db_path', str(db_path), # '--log', str(scripts_path / 'log/csv2h5_inclin_Kondrashov.log'), # '--b_raise_on_err', '0', # ? '--b_interact', '0', '--fs_float', str(p_type[cfg['in']['probes_prefix']] ['fs']), #f'{fs(probe, file_in.stem)}', '--dt_from_utc_seconds', str(cfg['in']['dt_from_utc'][probe].total_seconds()), '--b_del_temp_db', '1', ] + (['--csv_specific_param_dict', 'invert_magnitometr: True'] if probe_is_incl else []), **{ 'filter': { 'min_date': cfg['filter']['min_date'].get( probe, np.datetime64(0, 'ns')), 'max_date': cfg['filter']['max_date'].get( probe, np.datetime64('now', 'ns') ), # simple 'now' works in sinchronious mode } }) # Get coefs: l.info( f"Adding coefficients to {db_path}/{tbl} from {cfg['in']['db_coefs']}" ) try: h5copy_coef(cfg['in']['db_coefs'], db_path, tbl) except KeyError as e: # Unable to open object (component not found) l.warning( 'No coefs to copy?' ) # write some dummy coefficients to can load Veusz patterns: h5copy_coef(None, db_path, tbl, dict_matrices=dict_matrices_for_h5(tbl=tbl)) except OSError as e: l.warning( 'Not found DB with coefs?' ) # write some dummy coefficients to can load Veusz patterns: h5copy_coef(None, db_path, tbl, dict_matrices=dict_matrices_for_h5(tbl=tbl)) i_proc_file += 1 else: print('no', raw_pattern_file, end=', ') i_proc_probe += 1 print('Ok:', i_proc_probe, 'probes,', i_proc_file, 'files processed.') cfg_in['tables'] = ['incl30'] from inclinometer.incl_h5clc import h5_names_gen from inclinometer.h5inclinometer_coef import rot_matrix_x, rot_matrix_y #rotate_x, rotate_y # R*[xyz]. As we next will need apply coefs Ag = Rz*Ry*Rx we can incorporate this # operation by precalculate it adding known angles on each axes to Rz,Ry,Rx. # If rotation is 180 deg, then we can add it only to Rx. Modified coef: Ag_new = Rz*Ry*R(x+180) # R(x+180) = Rx*Rx180 equivalent to rotate Ag.T in opposite direction: # Ag_new = rotate_x() # inclinometer changed so that applying coefs returns rotated data fiels vectors: # Out_rotated = Ag * In # We rotate it back: # Out = rotate(Out_rotated) = # after angle after calibration to some angle P so determine angle relative to vertical # by rotate data vector in opposite dir: Out = Ag * R_back * In. This equivalent to have new coef by apply rotation to Ag: # Ag_new = Ag * R_back = (R_back.T * Ag.T).T = rotate_forward(Ag.T).T = # Applying calibration coef will get data in inverted basis so we need rotate it after: # # coefs['Ag'] = rotate_x(coefs['Ag'], angle_degrees=180) # coefs['Ah'] = rotate_x(coefs['Ah'], angle_degrees=180) # dfLogOld, cfg_out['db'], cfg_out['b_skip_if_up_to_date'] = h5temp_open(**cfg_out) for i1, (tbl, coefs) in enumerate(h5_names_gen(cfg_in), start=1): # using property of rotation around same axis: R(x, θ1)@R(x, θ2) = R(x, θ1 + θ2) coefs['Ag'] = coefs['Ag'] @ rot_matrix_x(np.cos(np.pi), np.sin(np.pi)) coefs['Ah'] = coefs['Ah'] @ rot_matrix_x(np.cos(np.pi), np.sin(np.pi)) coefs['azimuth_shift_deg'] = 180 h5copy_coef(None, cfg['out']['db_path'], tbl, dict_matrices=dict_matrices_for_h5(coefs, tbl, to_nested_keys=True)) # Calculate velocity and average if st(2): # if aggregate_period_s is None then not average and write to *_proc_noAvg.h5 else loading from that h5 and writing to _proc.h5 if not cfg['out']['aggregate_period_s']: cfg['out']['aggregate_period_s'] = [ None, 2, 600, 3600 if 'w' in cfg['in']['probes_prefix'] else 7200 ] if cfg['in']['azimuth_add']: if 'Lat' in cfg['in']['azimuth_add']: from datetime import datetime # add magnetic declination,° for used coordinates # todo: get time azimuth_add = mag_dec(cfg['in']['azimuth_add']['Lat'], cfg['in']['azimuth_add']['Lon'], datetime(2020, 9, 10), depth=-1) else: azimuth_add = 0 if 'constant' in cfg['in']['azimuth_add']: # and add constant. For example, subtruct declination at the calibration place if it was applied azimuth_add += cfg['in']['azimuth_add'][ 'constant'] # add -6.65644183° to account for calibration in Kaliningrad for aggregate_period_s in cfg['out']['aggregate_period_s']: if aggregate_period_s is None: db_path_in = db_path db_path_out = db_path.with_name( f'{db_path.stem}_proc_noAvg.h5') else: db_path_in = db_path.with_name(f'{db_path.stem}_proc_noAvg.h5') db_path_out = f'{db_path.stem}_proc.h5' # or separately: '_proc{aggregate_period_s}.h5' args = [ Path(incl_h5clc.__file__).with_name( f'incl_h5clc_{db_path.stem}.yaml'), # if no such file all settings are here '--db_path', str(db_path_in), # ! 'incl.*|w\d*' inclinometers or wavegauges w\d\d # 'incl09': '--tables_list', 'incl.*' if not cfg['in']['probes'] else f"incl.*(?:{'|'.join('{:0>2}'.format(p) for p in cfg['in']['probes'])})", '--aggregate_period', f'{aggregate_period_s}S' if aggregate_period_s else '', '--out.db_path', str(db_path_out), '--table', f'V_incl_bin{aggregate_period_s}' if aggregate_period_s else 'V_incl', '--verbose', 'INFO', #'DEBUG' get many numba messages '--b_del_temp_db', '1', # '--calc_version', 'polynom(force)', # depreshiated # '--chunksize', '20000', # '--not_joined_h5_path', f'{db_path.stem}_proc.h5', ] # if aggregate_period_s <= 5: # [s], do not need split csv for big average interval # args += (['--split_period', '1D']) if aggregate_period_s is None: # proc. parameters (if we have saved proc. data then when aggregating we are not processing) args += ([ '--max_dict', 'M[xyz]:4096', # Note: for Baranov's prog 4096 is not suited # '--time_range_zeroing_dict', "incl19: '2019-11-10T13:00:00', '2019-11-10T14:00:00'\n," # not works - use kwarg # '--time_range_zeroing_list', '2019-08-26T04:00:00, 2019-08-26T05:00:00' '--split_period', '1D' ] if subs_made else [ '--bad_p_at_bursts_starts_peroiod', '1H', ]) # csv splitted by 1day (default for no avg) and monolith csv if aggregate_period_s==600 if aggregate_period_s not in cfg['out'][ 'aggregate_period_s_not_to_text']: # , 300, 600]: args += ['--text_path', str(db_path.parent / 'text_output')] kwarg = { 'in': { 'min_date': cfg['filter']['min_date'][0], 'max_date': cfg['filter']['max_date'][0], 'time_range_zeroing': cfg['in']['time_range_zeroing'], 'azimuth_add': azimuth_add } } # If need all data to be combined one after one: # set_field_if_no(kwarg, 'in', {}) # kwarg['in'].update({ # # 'tables': [f'incl{i:0>2}' for i in min_date.keys() if i!=0], # 'dates_min': min_date.values(), # in table list order # 'dates_max': max_date.values(), # # }) # set_field_if_no(kwarg, 'out', {}) # kwarg['out'].update({'b_all_to_one_col': 'True'}) incl_h5clc.main(args, **kwarg) # Calculate spectrograms. if st(3): # Can be done at any time after step 1 def raise_ni(): raise NotImplementedError( 'Can not proc probes having different fs in one run: you need to do it separately' ) args = [ Path(incl_h5clc.__file__).with_name( f'incl_h5spectrum{db_path.stem}.yaml'), # if no such file all settings are here '--db_path', str(db_path.with_name(f'{db_path.stem}_proc_noAvg.h5')), '--tables_list', f"{cfg['in']['probes_prefix']}.*", # inclinometers or wavegauges w\d\d ## 'w02', 'incl.*', # '--aggregate_period', f'{aggregate_period_s}S' if aggregate_period_s else '', '--min_date', datetime64_str(cfg['filter']['min_date'][0]), '--max_date', datetime64_str(cfg['filter']['max_date'] [0]), # '2019-09-09T16:31:00', #17:00:00 # '--max_dict', 'M[xyz]:4096', # use if db_path is not ends with _proc_noAvg.h5 i.e. need calc velocity '--out.db_path', f"{db_path.stem.replace('incl', cfg['in']['probes_prefix'])}_proc_psd.h5", # '--table', f'psd{aggregate_period_s}' if aggregate_period_s else 'psd', '--fs_float', f"{fs(probes[0], cfg['in']['probes_prefix'])}", # (lambda x: x == x[0])(np.vectorize(fs)(probes, prefix))).all() else raise_ni() # # '--time_range_zeroing_list', '2019-08-26T04:00:00, 2019-08-26T05:00:00' # '--verbose', 'DEBUG', # '--chunksize', '20000', '--b_interact', '0', ] if 'w' in cfg['in']['probes_prefix']: args += [ '--split_period', '1H', '--dt_interval_minutes', '10', # burst mode '--fmin', '0.0001', '--fmax', '4' ] else: args += [ '--split_period', '2H', '--fmin', '0.0004', #0.0004 '--fmax', '1.05' ] incl_h5spectrum.main(args) # Draw in Veusz if st(4): b_images_only = True # False pattern_path = db_path.parent / r'vsz_5min\191119_0000_5m_incl19.vsz' # r'vsz_5min\191126_0000_5m_w02.vsz' if not b_images_only: pattern_bytes_slice_old = re.escape(b'((5828756, 5830223, None),)') # Length of not adjacent intervals, s (set None to not allow) period = '1D' length = '5m' # period # '1D' dt_custom_s = pd_period_to_timedelta( length) if length != period else None # None # 60 * 5 if True: # Load starts and assign ends t_intervals_start = pd.read_csv( cfg['in']['path_cruise'] / r'vsz+h5_proc\intervals_selected.txt', converters={ 'time_start': lambda x: np.datetime64(x, 'ns') }, index_col=0).index edges = (pd.DatetimeIndex(t_intervals_start), pd.DatetimeIndex(t_intervals_start + dt_custom_s) ) # np.zeros_like() else: # Generate periodic intervals t_interval_start, t_intervals_end = intervals_from_period( datetime_range=np.array( [ cfg['filter']['min_date']['0'], cfg['filter']['max_date']['0'] ], # ['2018-08-11T18:00:00', '2018-09-06T00:00:00'], # ['2019-02-11T13:05:00', '2019-03-07T11:30:00'], # ['2018-11-16T15:19', '2018-12-14T14:35'], # ['2018-10-22T12:30', '2018-10-27T06:30:00'], 'datetime64[s]'), period=period) edges = (pd.DatetimeIndex([t_interval_start ]).append(t_intervals_end[:-1]), pd.DatetimeIndex(t_intervals_end)) for i, probe in enumerate(probes): probe_name = f"{cfg['in']['probes_prefix']}{probe:02}" # table name in db l.info('Draw %s in Veusz: %d intervals...', probe_name, edges[0].size) # for i_interval, (t_interval_start, t_interval_end) in enumerate(zip(pd.DatetimeIndex([t_interval_start]).append(t_intervals_end[:-1]), t_intervals_end), start=1): cfg_vp = {'veusze': None} for i_interval, (t_interval_start, t_interval_end) in enumerate(zip(*edges), start=1): # if i_interval < 23: #<= 0: # TEMPORARY Skip this number of intervals # continue if period != length: t_interval_start = t_interval_end - pd.Timedelta( dt_custom_s, 's') try: # skipping absent probes start_end = h5q_interval2coord( db_path=str(db_path), table=f'/{probe_name}', t_interval=(t_interval_start, t_interval_end)) if not len(start_end): break # no data except KeyError: break # device name not in specified range, go to next name pattern_path_new = pattern_path.with_name( f"{t_interval_start:%y%m%d_%H%M}_{length}_{probe_name}.vsz" ) # Modify pattern file if not b_images_only: probe_name_old = re.match('.*((?:incl|w)\d*).*', pattern_path.name).groups()[0] bytes_slice = bytes( '(({:d}, {:d}, None),)'.format(*(start_end + np.int32([-1, 1]))), 'ascii') def f_replace(line): """ Replace in file 1. probe name 2. slice """ # if i_interval == 1: line, ok = re.subn(bytes(probe_name_old, 'ascii'), bytes(probe_name, 'ascii'), line) if ok: # can be only in same line line = re.sub(pattern_bytes_slice_old, bytes_slice, line) return line if not rep_in_file(pattern_path, pattern_path_new, f_replace=f_replace): l.warning('Veusz pattern not changed!') # break elif cfg_vp['veusze']: cfg_vp['veusze'].Load(str(pattern_path_new)) elif cfg_vp['veusze']: cfg_vp['veusze'].Load(str(pattern_path_new)) txt_time_range = \ """ "[['{:%Y-%m-%dT%H:%M}', '{:%Y-%m-%dT%H:%M}']]" \ """.format(t_interval_start, t_interval_end) print(f'{i_interval}. {txt_time_range}', end=' ') cfg_vp = veuszPropagate.main( [ Path(veuszPropagate.__file__).parent.with_name( 'veuszPropagate.ini'), # '--data_yield_prefix', '-', '--path', str( db_path ), # use for custom loading from db and some source is required '--tables_list', f'/{probe_name}', # 181022inclinometers/ \d* '--pattern_path', str(pattern_path_new), # fr'd:\workData\BalticSea\190801inclinometer_Schuka\{probe_name}_190807_1D.vsz', # str(db_path.parent / dir_incl / f'{probe_name}_190211.vsz'), #warning: create file with small name # '--before_next', 'restore_config', # '--add_to_filename', f"_{t_interval_start:%y%m%d_%H%M}_{length}", '--filename_fun', f'lambda tbl: "{pattern_path_new.name}"', '--add_custom_list', 'USEtime', # nAveragePrefer', '--add_custom_expressions_list', txt_time_range, # + """ # ", 5" # """, '--b_update_existed', 'True', '--export_pages_int_list', '1, 2', # 0 for all '6, 7, 8', #'1, 2, 3' # '--export_dpi_int', '200', '--export_format', 'emf', '--b_interact', '0', '--b_images_only', f'{b_images_only}', '--return', '<embedded_object>', # reuse to not bloat memory ], veusze=cfg_vp['veusze'])
prefix = 'incl' # 'incl' or 'w' # table name prefix in db and in raw files (to find raw fales name case will be UPPER anyway): 'incl' - inclinometer, 'w' - wavegauge # dir_incl = '' if 'inclinometer' in str(path_cruise) else 'inclinometer' # if not db_name: # then name by cruise dir: # db_name = re.match('(^[\d_]*).*', (path_cruise.parent if dir_incl else path_cruise).name # ).group(1).strip('_') + 'incl.h5' db_path_out = paths_db_in[0].with_name('many2one_out.h5') # Run steps (inclusive): st.start = 2 # 1 st.end = 2 m_TimeStart_csv = pd.Timestamp('2019-07-08T00:00:00Z') # Calculate velocity and average if st(2): # if aggregate_period_s is None then not average and write to *_proc_noAvg.h5 else loading from that h5 and writing to _proc.h5 for aggregate_period_s in [ 720 ]: # [None, 2, 300, 600, 3600 if 'w' in prefix else 7200] args = [ Path(incl_h5clc.__file__).with_name(f'incl_h5clc_many2one.yaml'), # if no such file all settings are here '--db_path', '|'.join(str(p) for p in paths_db_in), '--tables_list', ','.join( f'incl{i:0>2}' for i in probes ), #incl.*| ! 'incl.*|w\d*' inclinometers or wavegauges w\d\d # 'incl09', '--aggregate_period',
def main(new_arg=None, **kwargs): """ :param new_arg: list of strings, command line arguments :kwargs: dicts of dictcts (for each ini section): specified values overwrites ini values """ # global l cfg = cfg_from_args(my_argparser(), new_arg, **kwargs) if not cfg['program']: return # usually error of unrecognized arguments displayed cfg['in']['db_coefs'] = Path(cfg['in']['db_coefs']) for path_field in ['db_coefs', 'path_cruise']: if not cfg['in'][path_field].is_absolute(): cfg['in'][path_field] = ( cfg['in']['cfgFile'].parent / cfg['in'][path_field] ).resolve().absolute() # cfg['in']['cfgFile'].parent / def constant_factory(val): def default_val(): return val return default_val for lim in ('min_date', 'max_date'): # convert keys to int because they must be comparable to probes_int_list (for command line arguments keys are allways strings, in yaml you can set string or int) _ = {int(k): v for k, v in cfg['filter'][lim].items()} cfg['filter'][lim] = defaultdict(constant_factory(_.get(0)), _) l = init_logging(logging, None, None, 'INFO') #l = init_logging(logging, None, cfg['program']['log'], cfg['program']['verbose']) if True: # False. Experimental speedup but takes memory from dask.cache import Cache cache = Cache(2e9) # Leverage two gigabytes of memory cache.register() # Turn cache on globally #if __debug__: # # because there was errors on debug when default scheduler used # cfg['program']['dask_scheduler'] = 'synchronous' if cfg['program']['dask_scheduler']: if cfg['program']['dask_scheduler'] == 'distributed': from dask.distributed import Client # cluster = dask.distributed.LocalCluster(n_workers=2, threads_per_worker=1, memory_limit="5.5Gb") client = Client(processes=False) # navigate to http://localhost:8787/status to see the diagnostic dashboard if you have Bokeh installed # processes=False: avoide inter-worker communication for computations releases the GIL (numpy, da.array) # without is error else: if cfg['program']['dask_scheduler'] == 'synchronous': l.warning('using "synchronous" scheduler for debugging') import dask dask.config.set(scheduler=cfg['program']['dask_scheduler']) # Run steps : st.start = cfg['program']['step_start'] st.end = cfg['program']['step_end'] st.go = True if not cfg['out'][ 'db_name']: # set name by 'path_cruise' name or parent if it has digits at start. priority for name is "*inclinometer*" for p in (lambda p: [p, p.parent])(cfg['in']['path_cruise']): m = re.match('(^[\d_]*).*', p.name) if m: break cfg['out']['db_name'] = f"{m.group(1).strip('_')}incl.h5" dir_incl = next((d for d in cfg['in']['path_cruise'].glob('*inclinometer*') if d.is_dir()), cfg['in']['path_cruise']) db_path = dir_incl / '_raw' / cfg['out']['db_name'] # --------------------------------------------------------------------------------------------- # def fs(probe, name): # if 'w' in name.lower(): # Baranov's wavegauge electronic # return 10 # 5 # return 5 # if probe < 20 or probe in [23, 29, 30, 32, 33]: # 30 [4, 11, 5, 12] + [1, 7, 13, 30] # return 5 # if probe in [21, 25, 26] + list(range(28, 35)): # return 8.2 # return 4.8 def datetime64_str(time_str: Optional[str] = None) -> np.ndarray: """ Reformat time_str to ISO 8601 or to 'NaT'. Used here for input in funcs that converts str to numpy.datetime64 :param time_str: May be 'NaT' :return: ndarray of strings (tested for 1 element only) formatted by numpy. """ return np.datetime_as_string(np.datetime64(time_str, 's')) probes = cfg['in']['probes'] or range( 1, 41) # sets default range, specify your values before line --- raw_root, probe_is_incl = re.subn('INCL_?', 'INKL_', cfg['in']['probes_prefix'].upper()) # some parameters that depends of probe type (indicated by probes_prefix) p_type = defaultdict( # baranov's format constant_factory({ 'correct_fun': partial(correct_txt, mod_file_name=mod_incl_name, sub_str_list=[ b'^\r?(?P<use>20\d{2}(\t\d{1,2}){5}(\t\d{5}){8}).*', b'^.+' ]), 'fs': 10, 'format': 'Baranov', }), { (lambda x: x if x.startswith('incl') else 'incl')(cfg['in']['probes_prefix']): { 'correct_fun': partial( correct_txt, mod_file_name=mod_incl_name, sub_str_list=[ b'^(?P<use>20\d{2}(,\d{1,2}){5}(,\-?\d{1,6}){6}(,\d{1,2}\.\d{2})(,\-?\d{1,3}\.\d{2})).*', b'^.+' ]), 'fs': 5, 'format': 'Kondrashov', }, 'voln': { 'correct_fun': partial( correct_txt, mod_file_name=mod_incl_name, sub_str_list=[ b'^(?P<use>20\d{2}(,\d{1,2}){5}(,\-?\d{1,8})(,\-?\d{1,2}\.\d{2}){2}).*', b'^.+' ]), 'fs': 5, #'tbl_prefix': 'w', 'format': 'Kondrashov', } }) if st(1, 'Save inclinometer or wavegage data from ASCII to HDF5'): # Note: Can not find additional not corrected files for same probe if already have any corrected in search path (move them out if need) i_proc_probe = 0 # counter of processed probes i_proc_file = 0 # counter of processed files # patten to identify only _probe_'s raw data files that need to correct '*INKL*{:0>2}*.[tT][xX][tT]': raw_parent = dir_incl / '_raw' # raw_parent /= if cfg['in']['raw_subdir'] is None: cfg['in']['raw_subdir'] = '' dir_out = raw_parent / re.sub(r'[.\\/ *?]', '_', cfg['in']['raw_subdir']) # sub replaces multilevel subdirs to 1 level that correct_fun() can only make def dt_from_utc_2000(probe): """ Correct time of probes started without time setting. Raw date must start from 2000-01-01T00:00""" return ( datetime(year=2000, month=1, day=1) - cfg['in']['time_start_utc'][probe] ) if cfg['in']['time_start_utc'].get(probe) else timedelta(0) # convert cfg['in']['dt_from_utc'] keys to int cfg['in']['dt_from_utc'] = { int(p): v for p, v in cfg['in']['dt_from_utc'].items() } # convert cfg['in']['t_start_utc'] to cfg['in']['dt_from_utc'] and keys to int cfg['in']['dt_from_utc'].update( # overwriting the 'time_start_utc' where already exist {int(p): dt_from_utc_2000(p) for p, v in cfg['in']['time_start_utc'].items()} ) # make cfg['in']['dt_from_utc'][0] be default value cfg['in']['dt_from_utc'] = defaultdict( constant_factory(cfg['in']['dt_from_utc'].pop(0, timedelta(0))), cfg['in']['dt_from_utc']) for probe in probes: raw_found = [] raw_pattern_file = str( Path(glob.escape(cfg['in']['raw_subdir'])) / cfg['in']['raw_pattern'].format(prefix=raw_root, number=probe)) correct_fun = p_type[cfg['in']['probes_prefix']]['correct_fun'] # if not archive: if (not re.match(r'.*(\.zip|\.rar)$', cfg['in']['raw_subdir'], re.IGNORECASE)) and raw_parent.is_dir(): raw_found = list(raw_parent.glob(raw_pattern_file)) if not raw_found: # Check if already have corrected files for probe generated by correct_txt(). If so then just use them raw_found = list( dir_out.glob( f"{cfg['in']['probes_prefix']}{probe:0>2}.txt")) if raw_found: print('corrected csv file', [r.name for r in raw_found], 'found') correct_fun = lambda x, dir_out: x elif not cfg['in']['raw_subdir']: continue for file_in in (raw_found or open_csv_or_archive_of_them( raw_parent, binary_mode=False, pattern=raw_pattern_file)): file_in = correct_fun(file_in, dir_out=dir_out) if not file_in: continue tbl = file_in.stem # f"{cfg['in']['probes_prefix']}{probe:0>2}" # tbl = re.sub('^((?P<i>inkl)|w)_0', lambda m: 'incl' if m.group('i') else 'w', # correct name # re.sub('^[\d_]*|\*', '', file_in.stem).lower()), # remove date-prefix if in name csv2h5( [ str( Path(__file__).parent / 'ini' / f"csv_{'inclin' if probe_is_incl else 'wavegage'}_{p_type[cfg['in']['probes_prefix']]['format']}.ini" ), '--path', str(file_in), '--blocksize_int', '50_000_000', # 50Mbt '--table', tbl, '--db_path', str(db_path), # '--log', str(scripts_path / 'log/csv2h5_inclin_Kondrashov.log'), # '--b_raise_on_err', '0', # ? '--b_interact', '0', '--fs_float', str(p_type[cfg['in']['probes_prefix']] ['fs']), #f'{fs(probe, file_in.stem)}', '--dt_from_utc_seconds', str(cfg['in']['dt_from_utc'][probe].total_seconds()), '--b_del_temp_db', '1', ] + (['--csv_specific_param_dict', 'invert_magnitometr: True'] if probe_is_incl else []), **{ 'filter': { 'min_date': cfg['filter']['min_date'].get( probe, np.datetime64(0, 'ns')), 'max_date': cfg['filter']['max_date'].get( probe, np.datetime64('now', 'ns') ), # simple 'now' works in sinchronious mode } }) # Get coefs: l.info( f"Adding coefficients to {db_path}/{tbl} from {cfg['in']['db_coefs']}" ) try: h5copy_coef(cfg['in']['db_coefs'], db_path, tbl) except KeyError as e: # Unable to open object (component not found) l.warning( 'No coefs to copy?' ) # write some dummy coefficients to can load Veusz patterns: h5copy_coef(None, db_path, tbl, dict_matrices=dict_matrices_for_h5(tbl=tbl)) except OSError as e: l.warning( 'Not found DB with coefs?' ) # write some dummy coefficients to can load Veusz patterns: h5copy_coef(None, db_path, tbl, dict_matrices=dict_matrices_for_h5(tbl=tbl)) i_proc_file += 1 else: print('no', raw_pattern_file, end=', ') i_proc_probe += 1 print('Ok:', i_proc_probe, 'probes,', i_proc_file, 'files processed.') if st(2, 'Calculate physical parameters and average'): kwarg = { 'in': { 'min_date': cfg['filter']['min_date'][0], 'max_date': cfg['filter']['max_date'][0], 'time_range_zeroing': cfg['in']['time_range_zeroing'] }, 'proc': {} } # if aggregate_period_s is None then not average and write to *_proc_noAvg.h5 else loading from that h5 and writing to _proc.h5 if not cfg['out']['aggregate_period_s']: cfg['out']['aggregate_period_s'] = [ None, 2, 600, 7200 if probe_is_incl else 3600 ] if cfg['in']['azimuth_add']: if 'Lat' in cfg['in']['azimuth_add']: # add magnetic declination,° for used coordinates # todo: get time kwarg['proc']['azimuth_add'] = mag_dec( cfg['in']['azimuth_add']['Lat'], cfg['in']['azimuth_add']['Lon'], datetime(2020, 9, 10), depth=-1) else: kwarg['proc']['azimuth_add'] = 0 if 'constant' in cfg['in']['azimuth_add']: # and add constant. For example, subtruct declination at the calibration place if it was applied kwarg['proc']['azimuth_add'] += cfg['in']['azimuth_add'][ 'constant'] # add -6.656 to account for calibration in Kaliningrad (mag deg = 6.656°) for aggregate_period_s in cfg['out']['aggregate_period_s']: if aggregate_period_s is None: db_path_in = db_path db_path_out = dir_incl / f'{db_path.stem}_proc_noAvg.h5' else: db_path_in = dir_incl / f'{db_path.stem}_proc_noAvg.h5' db_path_out = dir_incl / f'{db_path.stem}_proc.h5' # or separately: '_proc{aggregate_period_s}.h5' # 'incl.*|w\d*' inclinometers or wavegauges w\d\d # 'incl09': tables_list_regex = f"{cfg['in']['probes_prefix'].replace('voln', 'w')}.*" if cfg['in']['probes']: tables_list_regex += "(?:{})".format('|'.join( '{:0>2}'.format(p) for p in cfg['in']['probes'])) args = [ '../../empty.yml', # all settings are here, so to not print 'using default configuration' we use some existed empty file '--db_path', str(db_path_in), '--tables_list', tables_list_regex, '--aggregate_period', f'{aggregate_period_s}S' if aggregate_period_s else '', '--out.db_path', str(db_path_out), '--table', f'V_incl_bin{aggregate_period_s}' if aggregate_period_s else 'V_incl', '--verbose', 'INFO', #'DEBUG' get many numba messages '--b_del_temp_db', '1', # '--calc_version', 'polynom(force)', # depreshiated # '--chunksize', '20000', # '--not_joined_h5_path', f'{db_path.stem}_proc.h5', ] if aggregate_period_s is None: # proc. parameters (if we have saved proc. data then when aggregating we are not processing) # Note: for Baranov's prog 4096 is not suited: args += ([ '--max_dict', 'M[xyz]:4096', # '--time_range_zeroing_dict', "incl19: '2019-11-10T13:00:00', '2019-11-10T14:00:00'\n," # not works - use kwarg # '--time_range_zeroing_list', '2019-08-26T04:00:00, 2019-08-26T05:00:00' '--split_period', '1D' ] if probe_is_incl else [ '--bad_p_at_bursts_starts_peroiod', '1H', ]) # csv splitted by 1day (default for no avg) else csv is monolith if aggregate_period_s not in cfg['out'][ 'aggregate_period_s_not_to_text']: # , 300, 600]: args += ['--text_path', str(dir_incl / 'text_output')] # If need all data to be combined one after one: # set_field_if_no(kwarg, 'in', {}) # kwarg['in'].update({ # # 'tables': [f'incl{i:0>2}' for i in min_date.keys() if i!=0], # 'dates_min': min_date.values(), # in table list order # 'dates_max': max_date.values(), # # }) # set_field_if_no(kwarg, 'out', {}) # kwarg['out'].update({'b_all_to_one_col': 'True'}) incl_h5clc.main(args, **kwarg) if st(3, 'Calculate spectrograms'): # Can be done at any time after step 1 min_Pressure = 7 # add dict dates_min like {probe: parameter} of incl_clc to can specify param to each probe def raise_ni(): raise NotImplementedError( 'Can not proc probes having different fs in one run: you need to do it separately' ) args = [ Path(incl_h5clc.__file__).with_name( f'incl_h5spectrum{db_path.stem}.yaml'), # if no such file all settings are here '--db_path', str(dir_incl / f'{db_path.stem}_proc_noAvg.h5'), '--tables_list', f"{cfg['in']['probes_prefix']}.*", # inclinometers or wavegauges w\d\d ## 'w02', 'incl.*', # '--aggregate_period', f'{aggregate_period_s}S' if aggregate_period_s else '', '--min_date', datetime64_str(cfg['filter']['min_date'][0]), '--max_date', datetime64_str(cfg['filter']['max_date'] [0]), # '2019-09-09T16:31:00', #17:00:00 '--min_Pressure', f'{min_Pressure}', # '--max_dict', 'M[xyz]:4096', # use if db_path is not ends with _proc_noAvg.h5 i.e. need calc velocity '--out.db_path', f"{db_path.stem.replace('incl', cfg['in']['probes_prefix'])}_proc_psd.h5", # '--table', f'psd{aggregate_period_s}' if aggregate_period_s else 'psd', '--fs_float', str(p_type[cfg['in']['probes_prefix']] ['fs']), # f"{fs(probes[0], cfg['in']['probes_prefix'])}", # (lambda x: x == x[0])(np.vectorize(fs)(probes, prefix))).all() else raise_ni() # # '--time_range_zeroing_list', '2019-08-26T04:00:00, 2019-08-26T05:00:00' # '--verbose', 'DEBUG', # '--chunksize', '20000', '--b_interact', '0', ] if probe_is_incl: args += [ '--split_period', '2H', '--fmin', '0.0004', #0.0004 '--fmax', '1.05' ] else: args += [ '--split_period', '1H', '--dt_interval_minutes', '15', # set this if burst mode to the burst interval '--fmin', '0.0001', '--fmax', '4', #'--min_Pressure', '-1e15', # to not load NaNs ] incl_h5spectrum.main(args) if st(4, 'Draw in Veusz'): pattern_path = dir_incl / r'processed_h5,vsz/201202-210326incl_proc#28.vsz' # r'\201202_1445incl_proc#03_pattern.vsz' #' # db_path.parent / r'vsz_5min\191119_0000_5m_incl19.vsz' # r'vsz_5min\191126_0000_5m_w02.vsz' b_images_only = False # importing in vsz index slices replacing: pattern_str_slice_old = None # Length of not adjacent intervals, s (set None to not allow) # pandas interval in string or tuple representation '1D' of period between intervals and interval to draw period_str = '0s' # '1D' # dt dt_str = '0s' # '5m' file_intervals = None period = to_offset(period_str).delta dt = to_offset(dt_str).delta # timedelta(0) # 60 * 5 if file_intervals and period and dt: # Load starts and assign ends t_intervals_start = pd.read_csv( cfg['in']['path_cruise'] / r'vsz+h5_proc\intervals_selected.txt', converters={ 'time_start': lambda x: np.datetime64(x, 'ns') }, index_col=0).index edges = (pd.DatetimeIndex(t_intervals_start), pd.DatetimeIndex(t_intervals_start + dt_custom_s) ) # np.zeros_like() elif period and dt: # Generate periodic intervals t_interval_start, t_intervals_end = intervals_from_period( datetime_range=np.array( [ cfg['filter']['min_date']['0'], cfg['filter']['max_date']['0'] ], # ['2018-08-11T18:00:00', '2018-09-06T00:00:00'], # ['2019-02-11T13:05:00', '2019-03-07T11:30:00'], # ['2018-11-16T15:19', '2018-12-14T14:35'], # ['2018-10-22T12:30', '2018-10-27T06:30:00'], 'datetime64[s]'), period=period) edges = (pd.DatetimeIndex([t_interval_start ]).append(t_intervals_end[:-1]), pd.DatetimeIndex(t_intervals_end)) else: # [min, max] edges for each probe edges_dict = { pr: [cfg['filter']['min_date'][pr], cfg['filter']['max_date'][pr]] for pr in probes } cfg_vp = {'veusze': None} for i, probe in enumerate(probes): # cfg_vp = {'veusze': None} if edges_dict: # custom edges for each probe edges = [pd.DatetimeIndex([t]) for t in edges_dict[probe]] # substr in file to rerplace probe_name_in_pattern (see below). probe_name = f"_{cfg['in']['probes_prefix'].replace('incl', 'i')}{probe:02}" tbl = None # f"/{cfg['in']['probes_prefix']}{probe:02}" # to check probe data exist in db else will not check l.info('Draw %s in Veusz: %d intervals...', probe_name, edges[0].size) # for i_interval, (t_interval_start, t_interval_end) in enumerate(zip(pd.DatetimeIndex([t_interval_start]).append(t_intervals_end[:-1]), t_intervals_end), start=1): for i_interval, (t_interval_start, t_interval_end) in enumerate(zip(*edges), start=1): # if i_interval < 23: #<= 0: # TEMPORARY Skip this number of intervals # continue if period and period != dt: t_interval_start = t_interval_end - pd.Timedelta( dt_custom_s, 's') if tbl: try: # skipping absent probes start_end = h5q_interval2coord( db_path=str(db_path), table=tbl, t_interval=(t_interval_start, t_interval_end)) if not len(start_end): break # no data except KeyError: break # device name not in specified range, go to next name pattern_path_new = pattern_path.with_name(''.join([ f'{t_interval_start:%y%m%d_%H%M}', f'_{dt_str}' if dt else '', f'{probe_name}.vsz' ])) # Modify pattern file if not b_images_only: pattern_type, pattern_number = re.match( r'.*(incl|w)_proc?#?(\d*).*', pattern_path.name).groups() probe_name_in_pattern = f"_{pattern_type.replace('incl', 'i')}{pattern_number}" def f_replace(line): """ Replace in file 1. probe name 2. slice """ # if i_interval == 1: line, ok = re.subn(probe_name_in_pattern, probe_name, line) if ok and pattern_str_slice_old: # can be only in same line str_slice = '(({:d}, {:d}, None),)'.format( *(start_end + np.int32([-1, 1]))) # bytes(, 'ascii') line = re.sub(pattern_str_slice_old, str_slice, line) return line if not rep_in_file(pattern_path, pattern_path_new, f_replace=f_replace, binary_mode=False): l.warning('Veusz pattern not changed!' ) # may be ok if we need draw pattern # break elif cfg_vp['veusze']: cfg_vp['veusze'].Load(str(pattern_path_new)) elif cfg_vp['veusze']: cfg_vp['veusze'].Load(str(pattern_path_new)) txt_time_range = \ """ "[['{:%Y-%m-%dT%H:%M}', '{:%Y-%m-%dT%H:%M}']]" \ """.format(t_interval_start, t_interval_end) print(f'{i_interval}. {txt_time_range}', end=' ') cfg_vp = veuszPropagate.main( [ Path(veuszPropagate.__file__).parent.with_name( 'veuszPropagate.ini'), # '--data_yield_prefix', '-', # '--path', str(db_path), # if custom loading from db and some source is required '--tables_list', '', # switches to search vsz-files only # f'/{probe_name}', # 181022inclinometers/ \d* '--pattern_path', str(pattern_path_new), # fr'd:\workData\BalticSea\190801inclinometer_Schuka\{probe_name}_190807_1D.vsz', # str(dir_incl / f'{probe_name}_190211.vsz'), #warning: create file with small name # '--before_next', 'restore_config', # '--add_to_filename', f"_{t_interval_start:%y%m%d_%H%M}_{dt}", '--filename_fun', f'lambda tbl: "{pattern_path_new.name}"', '--add_custom_list', f'USEtime__', # f'USEtime{probe_name}', nAveragePrefer', '--add_custom_expressions_list', txt_time_range, # + """ # ", 5" # """, '--b_update_existed', 'True', '--export_pages_int_list', '0', # 0 for all '6, 7, 8', #'1, 2, 3' # '--export_dpi_int', '200', '--export_format', 'jpg', #'emf', '--b_interact', '0', '--b_images_only', f'{b_images_only}', '--return', '<embedded_object>', # reuse to not bloat memory '--b_execute_vsz', 'True', '--before_next', 'Close()' # Close() need if b_execute_vsz many files ], veusze=cfg_vp['veusze']) if st(40, f'Draw in Veusz by loader-drawer.vsz method'): # save all vsz files that uses separate code from os import chdir as os_chdir dt_s = 300 cfg['in'][ 'pattern_path'] = db_path.parent / f'vsz_{dt_s:d}s' / '~pattern~.vsz' time_starts = pd.read_csv( db_path.parent / r'processed_h5,vsz' / 'intervals_selected.txt', index_col=0, parse_dates=True, date_parser=lambda x: pd.to_datetime(x, format='%Y-%m-%dT%H:%M:%S' )).index pattern_code = cfg['in']['pattern_path'].read_bytes( ) # encoding='utf-8' path_vsz_all = [] for i, probe in enumerate(probes): probe_name = f"{cfg['in']['probes_prefix']}{probe:02}" # table name in db l.info('Draw %s in Veusz: %d intervals...', probe_name, time_starts.size) for i_interval, time_start in enumerate(time_starts, start=1): path_vsz = cfg['in']['pattern_path'].with_name( f"{time_start:%y%m%d_%H%M}_{probe_name.replace('incl','i')}.vsz" ) # copy file to path_vsz path_vsz.write_bytes(pattern_code) # replaces 1st row path_vsz_all.append(path_vsz) os_chdir(cfg['in']['pattern_path'].parent) veuszPropagate.main( [ 'ini/veuszPropagate.ini', '--path', str(cfg['in']['pattern_path'].with_name( '??????_????_*.vsz')), # db_path), '--pattern_path', f"{cfg['in']['pattern_path']}_", # here used to auto get export dir only. may not be _not existed file path_ if ['out']['paths'] is provided # '--table_log', f'/{device}/logRuns', # '--add_custom_list', f'{device_veusz_prefix}USE_time_search_runs', # 'i3_USE_timeRange', # '--add_custom_expressions', # """'[["{log_row[Index]:%Y-%m-%dT%H:%M:%S}", "{log_row[DateEnd]:%Y-%m-%dT%H:%M:%S}"]]'""", # '--export_pages_int_list', '1', #'--b_images_only', 'True' '--b_interact', '0', '--b_update_existed', 'True', # todo: delete_overlapped '--b_images_only', 'True', '--load_timeout_s_float', str(cfg['program']['load_timeout_s']) # '--min_time', '2020-07-08T03:35:00', ], **{'out': { 'paths': path_vsz_all }}) if st(50, 'Export from existed Veusz files in dir'): pattern_parent = db_path.parent # r'vsz_5min\191126_0000_5m_w02.vsz'' pattern_path = str(pattern_parent / r'processed_h5,vsz' / '??????incl_proc#[1-9][0-9].vsz') # [0-2,6-9] veuszPropagate.main([ 'ini/veuszPropagate.ini', '--path', pattern_path, '--pattern_path', pattern_path, # '--export_pages_int_list', '1', #'--b_images_only', 'True' '--b_interact', '0', '--b_update_existed', 'True', # todo: delete_overlapped '--b_images_only', 'True', '--load_timeout_s_float', str(cfg['program']['load_timeout_s']), '--b_execute_vsz', 'True', '--before_next', 'Close()' # Close() need if b_execute_vsz many files ])
from to_pandas_hdf5.gpx2h5 import main as gpx2h5 from to_pandas_hdf5.CTD_calc import main as CTD_calc from h5toGpx import main as h5toGpx from grid2d_vsz import main as grid2d_vsz path_cruise = Path(r'd:\workData\BalticSea\200819_AI56') path_db = path_cruise / path_cruise.with_suffix('.h5').name # same name as dir st.go = True # False # st.start = 70 # default: 1, used: 5 30 70 80 st.end = 110 # 60 80 120 # Stop before steps that need manual preparings (70) i.e. set end < 70 at first # Gridding (last step) needs debugging if interactive filtering is needed # --------------------------------------------------------------------------------------------- if st(1, 'Save gpx navigation to DB'): gpx2h5([ '', '--db_path', str(path_db), '--path', str(path_cruise / r'navigation\_raw\*.gpx'), '--tables_list', ',navigation,', # skip waypoints '--table_prefix', r'', # '--min_date', '2019-07-17T14:00:00', '--b_skip_if_up_to_date', '0', '--b_interact', '0',
from to_vaex_hdf5.nmea2h5 import main as nmea2h5 st.go = True # False # st.start = 110 # 5 30 70 80 st.end = 230 # 60 80 120 path_cruise = Path(r'd:\WorkData\BlackSea\200909_Ashamba') path_db = path_cruise / path_cruise.with_suffix('.h5').name # same name as dir # Stop before steps that need manual preparings (70) i.e. set end < 70 at first # Gridding (last step) needs debugging if interactive filtering is needed # --------------------------------------------------------------------------------------------- min_coord = 'Lat:10, Lon:10' max_coord = 'Lat:50, Lon:50' if st(1, 'Save gpx navigation to DB'): # Save navigation to DB for folder in ('OpenCPN_my', 'MapSource_Baranov'): gpx2h5([ '', '--db_path', str(path_db), '--path', str(path_cruise / 'navigation' / folder / '*.gpx'), '--tables_list', ',navigation,', # skip waypoints '--table_prefix', r'', #'--b_search_in_subdirs', if set True to get rid of this loop then will be problems with overlapped data files # '--min_date', '2019-07-17T14:00:00', '--b_interact',