def get_response_from_file(inventory, resp_dir, network, station, location, channel, start_time, debug): """" get_response_from_file find response for a given Network, Station, Location cannel 2016-10-27 Manoch: craeted """ # If inventory list is not populated, populate it first. if inventory is None: inventory = get_response_inventory(resp_dir, False) this_start_time = UTCDateTime(start_time) seed_id = ".".join([network, station, location, channel]) msg_lib.info(f'Looking for seed ID: {seed_id}') this_inventory = None for inv in inventory: try: response = inv.get_response(seed_id, this_start_time) if debug: msg_lib.info(f'Response: {response}') this_inventory = inv break except Exception as ex: response = None return inventory, this_inventory
def read_url(target_url, verbose=False): """Read content of a URL.""" if verbose: msg_lib.info(f'Opening URL: {target_url}') with urlopen(target_url) as url: content = url.read().decode() return content
def getParam(args, key, msgLib, value=None): """get a run argument for the given key""" if key in args.keys(): msgLib.info(': '.join([key, args[key]])) return args[key] elif value is not None: return value else: msgLib.error("missing parameter " + key, 1) usage() sys.exit()
def get_param(these_args, this_key, value=None): """Get a run argument for a given key. """ if this_key in these_args.keys(): msgLib.info('{}: {}'.format(this_key, these_args[this_key])) return these_args[this_key] elif value is not None: return value else: msgLib.error('Missing parameter {}'.format(this_key), 1) usage() sys.exit()
def get_response_inventory(resp_dir, debug=False): """" get_response_inventory build a list of response inventories under a given met directory 2016-10-27 Manoch: craeted """ t0 = time() accept_count = 0 reject_count = 0 msg_lib.info('reading response files') file_list = [f for f in listdir(resp_dir) if isfile(join(resp_dir, f))] inventory = list() for resp_file in file_list: try: validation = validate_StationXML(os.path.join(resp_dir, resp_file)) if not validation[0]: msg_lib.error( f'Skipped invalid response file {os.path.join(resp_dir, resp_file)} {validation[1]}', 4) inv = read_inventory(os.path.join(resp_dir, resp_file)) accept_count += 1 inventory.append(inv) except TypeError as e: if debug: msg_lib.error( f'Skipped invalid response file {os.path.join(resp_dir, resp_file)} {e.message}', 4) reject_count += 1 except Exception as ex: if debug: msg_lib.error( f'Skipped, could not read {os.path.join(resp_dir, resp_file)} {ex}', 4) reject_count += 1 t1 = utils_lib.time_it("Response Inventory", t0) msg_lib.info( f'response inventory:{accept_count} valid and {reject_count} rejected') return inventory
def qc_3c_stream(stream, segment_length, window, sorted_channel_list, channel_groups, verbose): """ qc_3c_stream performs a QC on a 3-C stream by making sure all channels are present, traces are the same length and have same start and end times mostly needed for polarization analysis the output is an array of trace record numbers in the stream that passed the QC HISTORY: 2014-04-21 Manoch: created """ sender = 'qc_3c_stream' traces = str(stream) traces = traces.split("\n") if verbose: msg_lib.info(f'{sender}, there are total of {len(traces)} traces.') stream_list = list() # The first line is title. for trace_index, trace in enumerate(traces): if trace_index == 0: continue stream_list.append(f'{trace}|{trace_index}') # Sort to make sure related records are one after another. streams = sorted(stream_list) # extract the list, one record (line) at a time and group them qc_record_list = list() previous_group_name = "" group_count = -1 group_channels = list() group_records = list() group_names = list() station_info_list = list() time_info_list = list() channel_info_list = list() record_info_list = list() for line_index, line in enumerate(stream_list): # Reset the list for each record (line). this_station_info_list = list() this_time_info_list = list() this_channel_info_list = list() this_record_info_list = list() """ RECORD: NM.SIUC..BHE | 2009-11-01T11:00:00.019537Z - 2009-11-01T11:59:59.994537Z | 40.0 Hz, 144000 samples|1 | | | | sta_info time_info chan_info rec_info from each record extract parts """ sta_info, time_info, chan_info, rec_info = line.split("|") """ from each part extract list 0 1 2 3 this_station_info_list = [NET,STA,LOC,CHAN] """ this_station_info_list = sta_info.strip().split(".") # Replace blank locations with "--". this_station_info_list[2] = sta_lib.get_location( this_station_info_list[2]) """ 0 1 this_time_info_list = [START,END] """ this_time_info_list.append(time_info.strip().split(" - ")) """ 0 1 2 3 this_channel_info_list = [SAMPLING,UNIT,SAMPLES,TEXT] """ this_channel_info_list.append(chan_info.strip().split(" ")) # This_record_info_list = RECORD. this_record_info_list.append(int(rec_info.strip())) # Name each record as a channel group (do not include channel). this_group_name = ".".join( this_station_info_list[ii] for ii in range(len(this_station_info_list) - 1)) # Starting the first group, start saving info. if this_group_name != previous_group_name: group_count += 1 if verbose: msg_lib.info( f'{sender}, started group {group_count}: {this_group_name}' ) group_names.append(this_group_name) group_channels.append(list()) group_records.append(list()) previous_group_name = this_group_name # Save the channel names. group_channels[group_count].append(this_station_info_list[-1]) group_records[group_count].append(line_index) # # note: the following arrays are not grouped, hence extend and not append # time_info_list.extend(this_time_info_list) channel_info_list.extend(this_channel_info_list) station_info_list.extend([this_station_info_list]) record_info_list.extend(this_record_info_list) if verbose: msg_lib.info(f'{sender}, found {len(group_records)} record groups.') # QC each group for rec_index, rec in enumerate(group_records): # All group elements are in, start the QC. qc_passed = True if verbose: msg_lib.info( f'{sender}, QC for record group {group_names[rec_index]}') # Create a sorted list of unique channels. channel_list = sorted(set(group_channels[rec_index])) if verbose: msg_lib.info(f'{sender}, channel list: {channel_list}') # Missing Channels? # - based on missing records. if len(group_records[rec_index]) < 3: msg_lib.info( f'{sender}, missing channels records, received {len(group_records[rec_index])}' ) qc_passed = False else: # - based on channels missing from channel list. if channel_list not in sorted_channel_list: msg_lib.info( f'{sender}, missing channels records from {group_names[rec_index]} got ' f'{channel_list} while expecting {sorted_channel_list}') qc_passed = False # - channel list is valid else: msg_lib.info(f'{sender}, channel list complete {channel_list}') """ Gaps? This is a simple rejection based on gaps. A better choice will be to take segments and process those with sufficient length but with 3 channels involved, this will be too complicated -- manoch 2014-04-18 """ if len(group_records[rec_index]) > 3: msg_lib.info(f'{sender}, gaps in {group_names[rec_index]}') qc_passed = False else: msg_lib.info( f'{sender}, no gaps in {group_names[rec_index]}') # Check for sampling rates. rec1, rec2, rec3 = map(int, group_records[rec_index]) sampling_frequency_01 = float(channel_info_list[rec1][0]) sampling_frequency_02 = float(channel_info_list[rec2][0]) sampling_frequency_03 = float(channel_info_list[rec3][0]) if sampling_frequency_01 != sampling_frequency_02 or sampling_frequency_01 != sampling_frequency_03: msg_lib.info( f'{sender}, sampling frequencies do not match! ({sampling_frequency_01}, ' f'{sampling_frequency_02}, {sampling_frequency_03}' ) qc_passed = False else: msg_lib.info( f'{sender}, sampling frequencies: [{sampling_frequency_01}, ' f'{sampling_frequency_02},' f'{sampling_frequency_03}]') # Check for mismatched start time - Note: there are exactly 3 records. delay01 = np.abs( UTCDateTime(time_info_list[rec1][0]) - UTCDateTime(time_info_list[rec2][0])) delay02 = np.abs( UTCDateTime(time_info_list[rec1][0]) - UTCDateTime(time_info_list[rec3][0])) samplerate = 1.0 / float(channel_info_list[rec1][0]) # Calculate number of points needed for FFT (as a power of 2) based on the run parameters. num_samp_needed_03 = 2**int( math.log( int((float(segment_length) / samplerate + 1) / window), 2)) # make sure it is power of 2 if delay01 == 0.0 and delay02 == 0.0: msg_lib.info(f'{sender}, start times OK') else: if 0.0 < delay01 < samplerate: msg_lib.info( f'{sender}, start time difference between ' f'{".".join(station_info_list[rec1])} ' f'and {".".join(station_info_list[rec2])} is {delay01}s and is less ' f'than 1 sample') elif delay01 > 0.0 and delay01 >= samplerate: msg_lib.info( f'{sender}, start time difference between ' f'{".".join(station_info_list[rec1])} ' f'and {".".join(station_info_list[rec2])} is {delay01}s and is ' f'one sample or more') qc_passed = False if 0.0 < delay02 < samplerate: msg_lib.info( f'{sender}, start time difference between ' f'{".".join(station_info_list[rec1])} ' f'and {".".join(station_info_list[rec3])} is {delay02}s and is less ' f'than 1 sample') elif delay02 > 0.0 and delay02 >= samplerate: msg_lib.info( f'{sender}, start time difference between ' f'{".".join(station_info_list[rec1])} ' f'and {".".join(station_info_list[rec3])} is {delay02}s and is ' f'one sample or more') # Check for insufficient number of samples. if qc_passed: samples_list = list() for _rec in (rec1, rec2, rec3): samples_list.append( float(channel_info_list[_rec][2])) if verbose: msg_lib.info( f'{sender}, samples: {samples_list}') minimum_samples = np.min(samples_list) if minimum_samples < num_samp_needed_03: msg_lib.info( f'{sender}, wanted minimum of {num_samp_needed_03} ' f'but got only {minimum_samples}') qc_passed = False else: msg_lib.info( f'{sender}, wanted minimum of ' f'{num_samp_needed_03} got {minimum_samples}, OK' ) # mismatched end time. delay01 = np.abs( UTCDateTime(time_info_list[rec1][1]) - UTCDateTime(time_info_list[rec2][1])) delay02 = np.abs( UTCDateTime(time_info_list[rec1][1]) - UTCDateTime(time_info_list[rec3][1])) samplerate = 1.0 / float( channel_info_list[rec1][0]) qc_passed = True if delay01 == 0.0 and delay02 == 0.0: msg_lib.info(f'{sender}, end times OK') # For information only, we know we have enough samples! else: if 0.0 < delay01 < samplerate: msg_lib.info( f'{sender}, end time difference between ' f'{".".join(station_info_list[rec1])}' f' and {".".join(station_info_list[rec2])} is {delay01}s less ' f'than 1 sample') elif 0.0 < delay01 >= samplerate: msg_lib.info( f'{sender}, end time difference between ' f'{".".join(station_info_list[rec1])}' f' and {".".join(station_info_list[rec2])} ' f'is {delay01}s is 1 sample or more' ) if 0.0 < delay02 < samplerate: msg_lib.info( f'{sender}, end time difference between ' f'{".".join(station_info_list[rec1])}' f' and {".".join(station_info_list[rec3])} is {delay02}s and is ' f'less than 1 sample') elif delay02 > 0.0 and delay02 >= samplerate: msg_lib.info( f'{sender}, end time difference between ' f'{".".join(station_info_list[rec1])}' f' and {".".join(station_info_list[rec3])}' f' is {delay02}s and is 1 sample or more' ) # End of the QC save qc_passed flag. if qc_passed: chan_group_found = False # qc_record_list provides index of the record for each channel_groups element. for chans in channel_groups: # found the matching channel group? if group_channels[rec_index][0] in chans and group_channels[rec_index][1] in \ chans and group_channels[rec_index][2] in chans: msg_lib.info( f'{sender}, output channel order should be {chans}' ) ordered_group_records = list() group_channels_list = group_channels[rec_index] chan_group_found = True for chan in chans: qc_record_list.append( group_channels_list.index(chan)) break if not chan_group_found: code = msg_lib.error( f'{sender}, channel_groups parameter matching the ' f'output channel order [{group_channels[rec_index][0]}, ' f'{group_channels[rec_index][1]}, {group_channels[rec_index][2]}] ' f'not found', 4) sys.exit(code) if verbose: msg_lib.info(f'{sender}, passed records: {qc_record_list}') return qc_record_list
def get_fedcatalog_station(req_url, request_start, request_end, chunk_length, chunk_count=1): """Get station list from fedcatalog service.""" # This dictionary stores all the fedcatalog information. fedcatalog_info = dict() # This dictionary provides a template for fetdatalog creation. catalog_info = dict() bulk_list = collections.OrderedDict() dc_chunk_list = dict() msg_lib.info(f'sending request to fedcatalog: {req_url}') try: content = utils_lib.read_url(req_url) except Exception as _er: code = msg_lib.error(f'Request {req_url}: {_er}', 4) sys.exit(code) # Go through the station list and see if they qualify. _lines = content.split('\n') _line_index = -1 previous_dc = None dc_name = None for _line in _lines: _line_index += 1 # Skip the blank and the comment lines. if not _line.strip() or _line.startswith('#'): continue # From the parameter=value lines, we are interested in the DATACENTER and DATASELECTSERVICE lines. elif '=' in _line: _par, _value = _line.split('=') # Found the data center name. if _par == 'DATACENTER': if dc_name is not None: previous_dc = dc_name msg_lib.info(f'from the {_value} data center') dc_name, dc_url = _value.strip().split(',') # Initialize the data center information, create chunk_count containers for chunked requests. if dc_name not in catalog_info.keys(): msg_lib.info( f'Initiating fedcatalog request for {dc_name}') catalog_info[dc_name] = utils_lib.ObjDict({ 'url': dc_url, 'dataselect_service': '', 'bulk': list() }) # if this is not the first data center, save the previous data center's bulk list if bulk_list: this_dc_list = list() for _key in bulk_list: this_dc_list.append(bulk_list[_key][0]) # Break the list into chunks and add it to fedcatalog_info. We incorporate band_index, # in case multiple bands are requested. Otherwise, chunk_index of the next band will overwrite # chunk_index of this band. for chunk_index, chunk in enumerate( divide_to_chunks(this_dc_list, chunk_count)): chunk_dc = f'{previous_dc}_{chunk_index}' # Keep track of chunks for each DC for later use. if previous_dc not in dc_chunk_list.keys(): dc_chunk_list[previous_dc] = list() dc_chunk_list[previous_dc].append(chunk_dc) fedcatalog_info[chunk_dc] = catalog_info[ previous_dc].copy() fedcatalog_info[chunk_dc]['bulk'] = chunk # The list is saved. Now, reset the bulk_list. bulk_list = collections.OrderedDict() continue # Found the dataselect service address. elif _par == 'DATASELECTSERVICE': # Save the dataselect service address for all chunks. if dc_name in dc_chunk_list.keys(): for chunk_dc in dc_chunk_list[dc_name]: fedcatalog_info[chunk_dc][ 'dataselect_service'] = _value.strip() # Save the dataselect service address in the catalog for this DC, catalog_info[dc_name]['dataselect_service'] = _value.strip() msg_lib.info(f'dataselect service is {_value.strip()}') continue else: # Ignore the other definitions. continue # The rest are the station lines. # Skip the blank lines. if not (_line.strip()): continue # Get the station information. net, sta, loc, chan, sta_start, sta_end = get_request_items(_line) start = UTCDateTime(request_start) end = UTCDateTime(request_end) segment = -1 while start < end: segment += 1 req_start = start.strftime('%Y-%m-%dT%H:%M:%S') if start + chunk_length <= end: req_end = (start + chunk_length).strftime('%Y-%m-%dT%H:%M:%S') else: req_end = end.strftime('%Y-%m-%dT%H:%M:%S') _net_sta_key = f'{net}_{sta}_{chan}_{segment}' bulk_list[_net_sta_key] = (net, sta, loc, chan, req_start, req_end) start += chunk_length + 0.0001 # Save the last data center's bulk list. if bulk_list: this_dc_list = list() for _key in bulk_list.keys(): this_dc_list.append(bulk_list[_key]) # Break the list into chunks and add it to fedcatalog_info. for chunk_index, chunk in enumerate( divide_to_chunks(this_dc_list, chunk_count)): chunk_dc = f'{dc_name}_{chunk_index}' # Keep track of chunks for each DC for later use. if dc_name not in dc_chunk_list.keys(): dc_chunk_list[dc_name] = list() dc_chunk_list[dc_name].append(chunk_dc) fedcatalog_info[chunk_dc] = catalog_info[dc_name].copy() fedcatalog_info[chunk_dc]['bulk'] = chunk # Reset the bulk_list. bulk_list = collections.OrderedDict() return utils_lib.ObjDict(fedcatalog_info)
return OK ################################################################################################ # # Main # ################################################################################################ # # set parameters # args = getArgs(sys.argv) paramFileName = script.replace('.py', '') + '_param' print("\n\n\n") msgLib.info(', '.join([script, paramFileName, version])) msgLib.info('Param Path: ' + paramPath) try: param = importlib.import_module(paramFileName) msgLib.info("loaded: " + paramFileName) except: msgLib.error("failed to load: " + paramFileName, 1) sys.exit() channelList = getParam(args, 'chan', msgLib, param.chan) network = getParam(args, 'net', msgLib) if network is None: msgLib.error('network not defined!', 1) sys.exit() station = getParam(args, 'sta', msgLib)
msgLib.info('{}: {}'.format(this_key, these_args[this_key])) return these_args[this_key] elif value is not None: return value else: msgLib.error('Missing parameter {}'.format(this_key), 1) usage() sys.exit() # Get user-provided arguments and script libraries. args = get_args(sys.argv) param_file_name = '{}_param'.format(script.replace('.py', '')) print('\n\n\n') msgLib.info(', '.join([script, param_file_name, version])) try: param = importlib.import_module(param_file_name) except Exception as e: msgLib.error('failed to load: {}\n{}'.format(param_file_name, e), 1) sys.exit() verbose = int(get_param(args, 'verbose', value=param.verbose)) do_plot = int(get_param(args, 'plot', value=param.plot)) plot_nnm = int(get_param(args, 'plotnnm', value=param.plotnnm)) if verbose: msgLib.info('Param Path: {}'.format(paramPath)) msgLib.info('loaded: {}'.format(param_file_name)) # If plot is not requested, turn the display requirement.
else: usage() msg_lib.error(f'bad parameter file name {param_file}', 2) sys.exit() channel_directory = utils_lib.get_param( args, 'chandir', utils_lib.param(param, 'chanDir').chanDir, usage) verbose = utils_lib.get_param(args, 'verbose', False, usage) verbose = utils_lib.is_true(verbose) delimiter = param.separator if verbose: msg_lib.info(f'script: {script} {len(sys.argv) - 1} args: {sys.argv}') if len(sys.argv) < 9: code = msg_lib.error('not enough argument(s)', 1) usage() sys.exit(code) # The run arguments. network = utils_lib.get_param(args, 'net', None, usage) station = utils_lib.get_param(args, 'sta', None, usage) location = sta_lib.get_location(utils_lib.get_param(args, 'loc', None, usage)) """polarization files are all HOURLY files with 50% overlap computed as part of the polarization product date parameter of the hourly PSDs to start, it starts at hour 00:00:00 - HOURLY files YYYY-MM-DD""" variables = param.variables data_directory = param.dataDirectory
param_file = utils_lib.get_param(args, 'param', default_param_file, usage) # Check and see if the param file exists. if os.path.isfile(os.path.join(param_path, param_file + ".py")): sys.path.append(os.path.join(os.path.dirname(__file__), '..', 'param')) param = importlib.import_module(param_file) else: msg_lib.error(f'bad parameter file name {param_file}', 2) usage() sys.exit() verbose = utils_lib.get_param(args, 'verbose', False, usage) if verbose: msg_lib.info(f'script: {script} {len(sys.argv) - 1} args: {sys.argv}') if len(sys.argv) < 9: code = msg_lib.error('not enough argument(s)', 1) usage() sys.exit(code) # The run arguments. network = utils_lib.get_param(args, 'net', None, usage) station = utils_lib.get_param(args, 'sta', None, usage) location = sta_lib.get_location(utils_lib.get_param(args, 'loc', None, usage)) # Set the bin ranges. bin_start = list() bin_end = list() bins = param.bins
# Bin ranges. bin_start = list() bin_end = list() bins = param.bins for _bin in bins: try: bin_start.append(param.binStart[_bin]) bin_end.append(param.binEnd[_bin]) except Exception as ex: code = msg_lib.error(f'bad band {_bin} in param file', 2) sys.exit(code) if verbose > 0: msg_lib.info(f'PERIOD BIN START: {bin_start}') msg_lib.info(f'PERIOD BIN ENDis: {bin_end}') network = utils_lib.get_param(args, 'net', None, usage) station = utils_lib.get_param(args, 'sta', None, usage) location = sta_lib.get_location(utils_lib.get_param(args, 'loc', None, usage)) channel = utils_lib.get_param(args, 'chan', None, usage) xtype = utils_lib.get_param(args, 'xtype', None, usage) # NOTE: the input PSD file is assumed to have the same format as the output of the ntk_extractPsdHour.py script psd_file = utils_lib.get_param(args, 'file', None, usage) psd_directory = os.path.join(param.dataDirectory, param.psdDirectory) psd_file_name = os.path.join(psd_directory, ".".join([network, station, location]), channel, psd_file) # Check to see if the PSD file exists. if not os.path.isfile(psd_file_name):
param_file = utils_lib.get_param(args, 'param', default_param_file, usage) # Check and see if the param file exists. if os.path.isfile(os.path.join(param_path, param_file + ".py")): sys.path.append(os.path.join(os.path.dirname(__file__), '..', 'param')) param = importlib.import_module(param_file) else: msg_lib.error(f'bad parameter file name {param_file}', 2) usage() sys.exit() verbose = utils_lib.get_param(args, 'verbose', False, usage) if verbose: msg_lib.info(f'script: {script} {len(sys.argv) - 1} args: {sys.argv}') if len(sys.argv) < 9: code = msg_lib.error('not enough argument(s)', 1) usage() sys.exit(code) # The run arguments. network = utils_lib.get_param(args, 'net', None, usage) station = utils_lib.get_param(args, 'sta', None, usage) location = sta_lib.get_location(utils_lib.get_param(args, 'loc', None, usage)) # Set parameters for the time axis labeling. # Every year. years = YearLocator() # Every month.
date(int(start_year), int(start_month), int(start_day)) data_days_list = list() for i in range(delta.days + 1): this_day = start_datetime + td(days=i) data_days_list.append(this_day.strftime("%Y/%j")) """ Find and start reading the PSD files. Build the file tag for the PSD files to read, example: NM_SLM_--_BH_2009-01-06 """ psd_db_dir_tag, psd_db_file_tag = file_lib.get_dir(param.dataDirectory, param.psdDbDirectory, network, station, location, channel) msg_lib.info(f'PSD DIR TAG: {psd_db_dir_tag}') if verbose > 0: msg_lib.info(f'PSD FILE TAG: {psd_db_file_tag}') # Open the output file. psd_dir_tag, psd_file_tag = file_lib.get_dir(param.dataDirectory, param.psdDirectory, network, station, location, channel) file_lib.make_path(psd_dir_tag) tag_list = [ psd_file_tag, start_date_time.split('.')[0], end_date_time.split('.')[0], xtype ] output_file_name = file_lib.get_file_name(param.namingConvention, psd_dir_tag, tag_list)
def get_channel_waveform_files(network, station, location, channel, start_time, end_time, client, file_tag, resp_dir=None, inventory=None): """ get_channel_waveform_files gets data from files and the response form the FDSN client. for the requested network/station/location/channel and time the output is the corresponding data stream file_tag should be in such a way to guide the function in selecting waveform files like {this Path}/*.SAC channel may have the last one or two letters wildcarded (e.g. channel="EH*") to select all components with a common band/instrument code. All other selection criteria that accept strings (network, station, location) may also contain Unix style wildcards (*, ?, ...). HISTORY: 2015-03-17 Manoch: added the "waterLevel" parameter to provide user with more control on how the ObsPy module shrinks values under water-level of max spec amplitude when removing the instrument response. 2015-02-24 Manoch: introduced two new parameters (performInstrumentCorrection, applyScale) to allow user avoid instrument correction also now user can turn od decon. filter 2014-03-15 Manoch: created """ debug = True sender = 'get_channel_waveform_files' # Stream holds the final stream this_start_time = UTCDateTime(start_time) this_end_time = UTCDateTime(end_time) stream = Stream() try: # Read in the files to a stream. msg_lib.info(f'checking: {file_tag}') msg_lib.info('Apply scaling') stream_in = read(file_tag, start_time=this_start_time, end_time=this_end_time, nearest_sample=True) except Exception as ex: msg_lib.error( f'{network}, {station}, {location}, {channel}, {start_time}, {end_time} {ex}', 2) return None try: # Select the desire streams only. if location == "--": stream_out = stream_in.select(network=network, station=station, location="", channel=channel) else: stream_out = stream_in.select(network=network, station=station, location=location, channel=channel) for i in range(len(stream_out)): # Get the network, station, location and channel information. this_nslc, this_time, junk = str(stream_out[i]).split('|') net, sta, loc, chan = this_nslc.strip().split('.') if len(loc) == 0: loc = "--" # If resp_dir is defined, first look into user's resp_dir for stationXML files, # if not found get it from FDSN start, end = this_time.split(' - ') inv = None if resp_dir is not None: msg_lib.info(f'Getting response from {resp_dir}') this_loc = loc if loc == '--': this_loc = '' inventory, inv = get_response_from_file( inventory, resp_dir, net, sta, this_loc, chan, start_time, debug) if inv is not None: if debug: msg_lib.info(f'Attaching {inv}') stream_out[i].attach_response(inv) stream += stream_out[i] else: this_start_time = UTCDateTime(start.strip()) msg_lib.warning( sender, f'NO RESPONSE FILE: {net}, {sta}, {loc}, {chan}, {this_start_time}' ) if inv is None and client is not None: # The FDSN webservices return StationXML metadata. msg_lib.info('Getting response from IRIS') try: this_start_time = UTCDateTime(start.strip()) this_end_time = UTCDateTime(end.strip()) inv = client.get_stations(network=net, station=sta, location=loc, channel=chan, starttime=this_start_time, endtime=this_end_time, level="response") stream_out[i].attach_response(inv) stream += stream_out[i] if debug: msg_lib.info(f'Response attached: {inv}') except Exception as ex: this_start_time = UTCDateTime(start.strip()) msg_lib.warning( sender, f'NO RESPONSE: {net}, {sta}, {loc}, {chan}, ' f'{this_start_time}, {this_end_time} {ex}') continue except Exception as ex: print(str(e)) msg_lib.error( f'get_channel_waveform_files {network}, {station}, {location}, {channel}, {start_time}, ' f'{end_time}, {ex}', 2) return None, None return inventory, stream
# Set the run mode. do_plot = utils_lib.get_param(args, 'plot', utils_lib.param(param, 'plot').plot, usage) do_plot = utils_lib.is_true(do_plot) plot_nm = utils_lib.get_param(args, 'plotnm', utils_lib.param(param, 'plotNm').plotNm, usage) do_plot_nnm = utils_lib.is_true(plot_nm) verbose = utils_lib.get_param(args, 'verbose', utils_lib.param(param, 'verbose').verbose, usage) verbose = utils_lib.is_true(verbose) timing = utils_lib.get_param(args, 'timing', utils_lib.param(param, 'timing').timing, usage) timing = utils_lib.is_true(timing) if verbose: msg_lib.info(f'script: {script} {len(sys.argv) - 1} args: {sys.argv}') octaveWindowWidth = float(1.0 / 2.0) octaveWindowShift = float(1.0 / 8.0) # Smoothing window shift : float(1.0/8.0)= 1/8 octave shift; # float(1.0/8.0) 1/8 octave shift, etc. """ Smoothing window width in octave. For test against PQLX use 1 octave width. Smoothing window width : float(1.0/1.0)= 1 octave smoothing; float(1.0/4.0) 1/4 octave smoothing, etc.""" octave_window_width = float(utils_lib.get_param(args, 'sw_width', utils_lib.param(param, 'octaveWindowWidth').octaveWindowWidth, usage)) # Smoothing window shift : float(1.0/4.0)= 1/4 octave shift; float(1.0/8.0) 1/8 octave shift, etc. octave_window_shift = float(utils_lib.get_param(args, 'sw_shift', utils_lib.param(param, 'octaveWindowShift').octaveWindowShift, usage))
if len(data_day_list) <= 0: usage() msg_lib.error(f'Bad start/end times [{start_date_time}, {end_date_time}]', 2) sys.exit() # Find PSD files and start reading them. # build the file tag for the PSD files to read, example: # NM_SLM_--_BH_2009-01-06 psd_db_dir_tag, psd_db_file_tag = file_lib.get_dir(param.dataDirectory, param.psdDbDirectory, network, station, location, channel) msg_lib.info(f'PSD DIR TAG: {psd_db_dir_tag}') # Loop through the windows. for n in range(len(data_day_list)): msg_lib.info(f'day {data_day_list[n]}') d_file = dict() h_file = list() this_file = os.path.join(psd_db_dir_tag, data_day_list[n], f'{psd_db_file_tag}*{xtype}.txt') if verbose: msg_lib.info(f'Looking into: {this_file}') this_file_list = sorted(glob.glob(this_file)) if len(this_file_list) <= 0: msg_lib.warning('Main', 'No files found!') if verbose:
# Run parameters. xtype = utils_lib.get_param(args, 'xtype', utils_lib.param(param, 'xType').xType[0], usage) if xtype not in param.xType: usage() code = msg_lib.error(f'{script}, Invalid xtype [{xtype}]', 2) sys.exit(code) # Set the run mode. verbose = utils_lib.get_param(args, 'verbose', utils_lib.param(param, 'verbose').verbose, usage) verbose = utils_lib.is_true(verbose) if verbose: msg_lib.info( f'{script}, script: {script} {len(sys.argv) - 1} args: {sys.argv}') # We always want to start from the beginning of the day, so we discard user hours, if any start_date_time = utils_lib.get_param(args, 'start', None, usage) try: start_datetime, start_year, start_month, start_day, start_doy = utils_lib.time_info( start_date_time) except Exception as ex: usage() code = msg_lib.error(f'Invalid start ({start_date_time})\n{ex}', 2) sys.exit(code) # We always want to start from the beginning of the day, so we discard user hours, if any. end_date_time = utils_lib.get_param(args, 'end', None, usage) # end_date_time is included.