def test_corner_frequencies(): # Default config has 'constant' corner frequency method, so the need # here is to force the 'snr' method. data_files, origin = read_data_dir('geonet', 'us1000778i', '*.V1A') streams = [] for f in data_files: streams += read_data(f) sc = StreamCollection(streams) config = get_config() window_conf = config['windows'] processed_streams = sc.copy() for st in processed_streams: if st.passed: # Estimate noise/signal split time event_time = origin.time event_lon = origin.longitude event_lat = origin.latitude st = signal_split(st, origin) # Estimate end of signal end_conf = window_conf['signal_end'] event_mag = origin.magnitude print(st) st = signal_end( st, event_time=event_time, event_lon=event_lon, event_lat=event_lat, event_mag=event_mag, **end_conf ) wcheck_conf = window_conf['window_checks'] st = window_checks( st, min_noise_duration=wcheck_conf['min_noise_duration'], min_signal_duration=wcheck_conf['min_signal_duration'] ) pconfig = config['processing'] # Run SNR check # I think we don't do this anymore. test = [ d for d in pconfig if list(d.keys())[0] == 'compute_snr' ] snr_config = test[0]['compute_snr'] for stream in processed_streams: stream = compute_snr( stream, **snr_config ) # Run get_corner_frequencies test = [ d for d in pconfig if list(d.keys())[0] == 'get_corner_frequencies' ] cf_config = test[0]['get_corner_frequencies'] snr_config = cf_config['snr'] lp = [] hp = [] for stream in processed_streams: if not stream.passed: continue stream = get_corner_frequencies( stream, method="snr", snr=snr_config ) if stream[0].hasParameter('corner_frequencies'): cfdict = stream[0].getParameter('corner_frequencies') lp.append(cfdict['lowpass']) hp.append(cfdict['highpass']) np.testing.assert_allclose( np.sort(hp), [0.00751431, 0.01354455, 0.04250735], atol=1e-6 )
def process_streams(streams, origin, config=None): """ Run processing steps from the config file. This method looks in the 'processing' config section and loops over those steps and hands off the config options to the appropriate prcessing method. Streams that fail any of the tests are kepth in the StreamCollection but the parameter 'passed_checks' is set to False and subsequent processing steps are not applied once a check has failed. Args: streams (list): A StreamCollection object. origin (ScalarEvent): ScalarEvent object. config (dict): Configuration dictionary (or None). See get_config(). Returns: A StreamCollection object. """ if not isinstance(streams, StreamCollection): raise ValueError('streams must be a StreamCollection instance.') if config is None: config = get_config() logging.info('Processing streams...') event_time = origin.time event_lon = origin.longitude event_lat = origin.latitude # ------------------------------------------------------------------------- # Compute a travel-time matrix for interpolation later in the # trim_multiple events step if any('trim_multiple_events' in dict for dict in config['processing']): travel_time_df, catalog = create_travel_time_dataframe( streams, **config['travel_time']) # ------------------------------------------------------------------------- # Begin noise/signal window steps logging.info('Windowing noise and signal...') window_conf = config['windows'] model = TauPyModel(config['pickers']['travel_time']['model']) for st in streams: logging.info('Checking stream %s...' % st.get_id()) # Estimate noise/signal split time st = signal_split(st, origin, model, picker_config=config['pickers'], config=config) # Estimate end of signal end_conf = window_conf['signal_end'] event_mag = origin.magnitude st = signal_end(st, event_time=event_time, event_lon=event_lon, event_lat=event_lat, event_mag=event_mag, **end_conf) wcheck_conf = window_conf['window_checks'] if wcheck_conf['do_check']: st = window_checks( st, min_noise_duration=wcheck_conf['min_noise_duration'], min_signal_duration=wcheck_conf['min_signal_duration']) # ------------------------------------------------------------------------- # Begin processing steps logging.info('Starting processing...') processing_steps = config['processing'] # Loop over streams for stream in streams: logging.info('Stream: %s' % stream.get_id()) for processing_step_dict in processing_steps: key_list = list(processing_step_dict.keys()) if len(key_list) != 1: raise ValueError( 'Each processing step must contain exactly one key.') step_name = key_list[0] logging.info('Processing step: %s' % step_name) step_args = processing_step_dict[step_name] # Using globals doesn't seem like a great solution here, but it # works. if step_name not in globals(): raise ValueError('Processing step %s is not valid.' % step_name) # Origin is required by some steps and has to be handled specially. # There must be a better solution for this... if step_name == 'fit_spectra': step_args = {'origin': origin} elif step_name in REQ_ORIGIN: step_args['origin'] = origin elif step_name == 'trim_multiple_events': step_args['catalog'] = catalog step_args['travel_time_df'] = travel_time_df elif step_name == 'compute_snr': step_args['mag'] = origin.magnitude if step_args is None: stream = globals()[step_name](stream) else: stream = globals()[step_name](stream, **step_args) # ------------------------------------------------------------------------- # Begin colocated instrument selection colocated_conf = config['colocated'] streams.select_colocated(**colocated_conf) for st in streams: for tr in st: tr.stats.standard.process_level = PROCESS_LEVELS['V2'] logging.info('Finished processing streams.') return streams
def process_streams(streams, origin, config=None): """ Run processing steps from the config file. This method looks in the 'processing' config section and loops over those steps and hands off the config options to the appropriate prcessing method. Streams that fail any of the tests are kepth in the StreamCollection but the parameter 'passed_checks' is set to False and subsequent processing steps are not applied once a check has failed. Args: streams (list): A StreamCollection object. origin (ScalarEvent): ScalarEvent object. config (dict): Configuration dictionary (or None). See get_config(). Returns: A StreamCollection object. """ if not isinstance(streams, StreamCollection): raise ValueError('streams must be a StreamCollection instance.') if config is None: config = get_config() logging.info('Processing streams...') event_time = origin.time event_lon = origin.longitude event_lat = origin.latitude # ------------------------------------------------------------------------- # Begin noise/signal window steps logging.info('Windowing noise and signal...') window_conf = config['windows'] processed_streams = streams.copy() for st in processed_streams: logging.info('Checking stream %s...' % st.get_id()) # Estimate noise/signal split time st = signal_split(st, origin) # Estimate end of signal end_conf = window_conf['signal_end'] event_mag = origin.magnitude st = signal_end(st, event_time=event_time, event_lon=event_lon, event_lat=event_lat, event_mag=event_mag, **end_conf) wcheck_conf = window_conf['window_checks'] if wcheck_conf['do_check']: st = window_checks( st, min_noise_duration=wcheck_conf['min_noise_duration'], min_signal_duration=wcheck_conf['min_signal_duration']) # ------------------------------------------------------------------------- # Begin processing steps logging.info('Starting processing...') processing_steps = config['processing'] # Loop over streams for stream in processed_streams: logging.info('Stream: %s' % stream.get_id()) for processing_step_dict in processing_steps: key_list = list(processing_step_dict.keys()) if len(key_list) != 1: raise ValueError( 'Each processing step must contain exactly one key.') step_name = key_list[0] logging.info('Processing step: %s' % step_name) step_args = processing_step_dict[step_name] # Using globals doesn't seem like a great solution here, but it # works. if step_name not in globals(): raise ValueError('Processing step %s is not valid.' % step_name) # Origin is required by some steps and has to be handled specially. # There must be a better solution for this... if step_name == 'fit_spectra': step_args = {'origin': origin} elif step_name in REQ_ORIGIN: step_args['origin'] = origin if step_args is None: stream = globals()[step_name](stream) else: stream = globals()[step_name](stream, **step_args) # Build the summary report? build_conf = config['build_report'] if build_conf['run']: build_report(processed_streams, build_conf['directory'], origin, config=config) logging.info('Finished processing streams.') return processed_streams
def test_corner_frequencies(): # Default config has 'constant' corner frequency method, so the need # here is to force the 'snr' method. data_files, origin = read_data_dir('geonet', 'us1000778i', '*.V1A') streams = [] for f in data_files: streams += read_data(f) sc = StreamCollection(streams) config = get_config() window_conf = config['windows'] processed_streams = sc.copy() for st in processed_streams: if st.passed: # Estimate noise/signal split time event_time = origin.time event_lon = origin.longitude event_lat = origin.latitude st = signal_split(st, origin) # Estimate end of signal end_conf = window_conf['signal_end'] event_mag = origin.magnitude print(st) st = signal_end(st, event_time=event_time, event_lon=event_lon, event_lat=event_lat, event_mag=event_mag, **end_conf) wcheck_conf = window_conf['window_checks'] st = window_checks( st, min_noise_duration=wcheck_conf['min_noise_duration'], min_signal_duration=wcheck_conf['min_signal_duration']) pconfig = config['processing'] # Run SNR check # I think we don't do this anymore. test = [d for d in pconfig if list(d.keys())[0] == 'compute_snr'] snr_config = test[0]['compute_snr'] for stream in processed_streams: stream = compute_snr(stream, **snr_config) # Run get_corner_frequencies test = [ d for d in pconfig if list(d.keys())[0] == 'get_corner_frequencies' ] cf_config = test[0]['get_corner_frequencies'] snr_config = cf_config['snr'] # With same_horiz False snr_config['same_horiz'] = False lp = [] hp = [] for stream in processed_streams: if not stream.passed: continue stream = get_corner_frequencies(stream, method="snr", snr=snr_config) if stream[0].hasParameter('corner_frequencies'): cfdict = stream[0].getParameter('corner_frequencies') lp.append(cfdict['lowpass']) hp.append(cfdict['highpass']) np.testing.assert_allclose(np.sort(hp), [0.00751431, 0.01354455, 0.04250735], atol=1e-6) st = processed_streams.select(station='HSES')[0] lps = [tr.getParameter('corner_frequencies')['lowpass'] for tr in st] hps = [tr.getParameter('corner_frequencies')['highpass'] for tr in st] np.testing.assert_allclose(np.sort(lps), [100., 100., 100.], atol=1e-6) np.testing.assert_allclose(np.sort(hps), [0.00305176, 0.00751431, 0.02527502], atol=1e-6) # With same_horiz True snr_config['same_horiz'] = True lp = [] hp = [] for stream in processed_streams: if not stream.passed: continue stream = get_corner_frequencies(stream, method="snr", snr=snr_config) if stream[0].hasParameter('corner_frequencies'): cfdict = stream[0].getParameter('corner_frequencies') lp.append(cfdict['lowpass']) hp.append(cfdict['highpass']) np.testing.assert_allclose(np.sort(hp), [0.00751431, 0.01354455, 0.04882812], atol=1e-6) st = processed_streams.select(station='HSES')[0] lps = [tr.getParameter('corner_frequencies')['lowpass'] for tr in st] hps = [tr.getParameter('corner_frequencies')['highpass'] for tr in st] np.testing.assert_allclose(np.sort(lps), [100., 100., 100.], atol=1e-6) np.testing.assert_allclose(np.sort(hps), [0.00751431, 0.00751431, 0.02527502], atol=1e-6)
def process_streams(streams, origin, config=None): """ Run processing steps from the config file. This method looks in the 'processing' config section and loops over those steps and hands off the config options to the appropriate prcessing method. Streams that fail any of the tests are kepth in the StreamCollection but the parameter 'passed_checks' is set to False and subsequent processing steps are not applied once a check has failed. Args: streams (list): A StreamCollection object. origin (ScalarEvent): ScalarEvent object. config (dict): Configuration dictionary (or None). See get_config(). Returns: A StreamCollection object. """ if not isinstance(streams, StreamCollection): raise ValueError('streams must be a StreamCollection instance.') if config is None: config = get_config() logging.info('Processing streams...') event_time = origin.time event_lon = origin.longitude event_lat = origin.latitude # ------------------------------------------------------------------------- # Begin noise/signal window steps logging.info('Windowing noise and signal...') window_conf = config['windows'] processed_streams = streams.copy() for st in processed_streams: logging.info('Checking stream %s...' % st.get_id()) # Estimate noise/signal split time st = signal_split( st, origin) # Estimate end of signal end_conf = window_conf['signal_end'] event_mag = origin.magnitude st = signal_end( st, event_time=event_time, event_lon=event_lon, event_lat=event_lat, event_mag=event_mag, **end_conf ) wcheck_conf = window_conf['window_checks'] if wcheck_conf['do_check']: st = window_checks( st, min_noise_duration=wcheck_conf['min_noise_duration'], min_signal_duration=wcheck_conf['min_signal_duration'] ) # ------------------------------------------------------------------------- # Begin processing steps logging.info('Starting processing...') processing_steps = config['processing'] # Loop over streams for stream in processed_streams: logging.info('Stream: %s' % stream.get_id()) for processing_step_dict in processing_steps: key_list = list(processing_step_dict.keys()) if len(key_list) != 1: raise ValueError( 'Each processing step must contain exactly one key.') step_name = key_list[0] logging.info('Processing step: %s' % step_name) step_args = processing_step_dict[step_name] # Using globals doesn't seem like a great solution here, but it # works. if step_name not in globals(): raise ValueError( 'Processing step %s is not valid.' % step_name) # Origin is required by some steps and has to be handled specially. # There must be a better solution for this... if step_name == 'fit_spectra': step_args = { 'origin': origin } elif step_name in REQ_ORIGIN: step_args['origin'] = origin if step_args is None: stream = globals()[step_name](stream) else: stream = globals()[step_name](stream, **step_args) # Build the summary report? build_conf = config['build_report'] if build_conf['run']: build_report(processed_streams, build_conf['directory'], origin, config=config) logging.info('Finished processing streams.') return processed_streams