def batch_rf(init, events, inventory, path, format='H5', request_window=None, phase='P', dist_range=None, **rf_kwargs): get_waveform = init() root = phase + 'rf' _check_path(join(path, root)) method = phase[-1] if dist_range is None: dist_range = (30, 90) if method == 'P' else (60, 85) if request_window is None: request_window = (-50, 150) if method == 'P' else (-80, 50) for kwargs, event, coords in _iter(events, inventory, rf=True): stats = rfstats(station=coords, event=event, phase=phase, dist_range=dist_range) if not stats: continue kwargs.update({'starttime': stats.onset + request_window[0], 'endtime': stats.onset + request_window[1]}) stream = get_waveform(**kwargs) if stream is None: continue stream = RFStream(stream, warn=False) stream.merge() if len(stream) != 3: import warnings warnings.warn('Need 3 component seismograms. More or less ' 'than three components for event %s, station %s.' % (stats.event_id, kwargs['seed_id'])) continue for tr in stream: tr.stats.update(stats) stream.rf(method=method, **rf_kwargs) if len(stream) != 3: continue _write(stream, path, root, format)
def rf_dmt(data_path, rf, events=None, phase='P', dist=None, **rf_kwargs): """ TODO: doc rf_dmt """ events = readEvents(events) print events for event in events: event_id = event.resource_id.getQuakeMLURI().split('/')[-1] inputs = data_path.format(eventid=event_id) inputs = glob.glob(data_path) while len(inputs) > 0: files_tmp = inputs[0][:-1] + '?' for f in glob.glob(files_tmp): inputs.remove(f) st = RFStream(read(files_tmp, headonly=True)) st.read_sac_header() stats = rfstats(stats=st[0].stats, event=event, phase=phase, dist_range=dist) if not stats: continue st = RFStream(read(files_tmp)) st.merge() if len(st) != 3: import warnings warnings.warn('Need 3 component seismograms. ' 'Error for files %s' % files_tmp) continue for tr in st: tr.stats.update(stats) st.rf(method=phase[0], **rf_kwargs) for tr in st: output = rf.format(eventid=event_id, stats=tr.stats) _create_dir(output) tr.write(output, 'SAC')
def rf_client(get_waveform, rf, stations=None, events=None, request_window=(-50, 150), phase='P', dist=None, **rf_kwargs): # S: -300 bis 300 """ TODO: doc rf_client """ events = readEvents(events) stations = _read_stations(stations) for event in events: event_id = event.resource_id.getQuakeMLURI().split('/')[-1] for station in stations: stats = rfstats(station=stations[station], event=event, phase=phase, dist_range=dist) if not stats: continue st = get_waveform(station, stats.onset + request_window[0], stats.onset + request_window[1]) st = RFStream(stream=st) st.merge() if len(st) != 3: import warnings warnings.warn('Need 3 component seismograms. More or less ' 'than three components for event %s, station %s.' % (event_id, station)) continue for tr in st: tr.stats.update(stats) st.rf(method=phase[0], **rf_kwargs) st.write_sac_header() for tr in st: output = rf.format(eventid=event_id, stats=tr.stats) _create_dir(output) tr.write(output, 'SAC')
def test_deconvolution_of_convolution(self): from rf.rfstream import RFStream, RFTrace data = np.zeros(400) data_src = np.zeros(400) hann1 = get_window('hann', 10) hann2 = get_window('hann', 50) data_src[40:50] = hann1 data_src[50:60] = -hann1 data[100:150] = hann2 data[240:290] = 0.5 * hann2 data_rsp = convolve(data_src, data, 'full')[50:450] / 3. stream1 = RFStream([RFTrace(data=data_src), RFTrace(data=data_rsp)]) for i, tr in enumerate(stream1): tr.stats.channel = tr.stats.channel[:2] + 'LQT'[i] tr.stats.onset = tr.stats.starttime + 40 stream2 = stream1.copy() stream1.deconvolve(spiking=10) stream2.deconvolve(method='freq', waterlevel=0.1) # import matplotlib.pyplot as plt # plt.subplot(121) # plt.plot(data, label='desired') # plt.plot(data_src, label='source') # plt.plot(data_rsp, label='convolution') # plt.plot(stream1[0].data, label='deconv src') # plt.plot(stream1[1].data, label='deconv') # plt.legend() # plt.subplot(122) # plt.plot(data) # plt.plot(data_src) # plt.plot(data_rsp) # plt.plot(stream2[0].data) # plt.plot(stream2[1].data) # plt.show() # (shift from middle of source (50) to onset (40) peakpos = np.argmax(data) - 10 self.assertEqual(peakpos, np.argmax(stream1[1].data)) self.assertEqual(peakpos, np.argmax(stream2[1].data))
def test_deconvolution_of_convolution(self): from rf.rfstream import RFStream, RFTrace data = np.zeros(400) data_src = np.zeros(400) hann1 = get_window('hann', 10) hann2 = get_window('hann', 50) data_src[40:50] = hann1 data_src[50:60] = -hann1 data[100:150] = hann2 data[240:290] = 0.5 * hann2 data_rsp = convolve(data_src, data, 'full')[50:450]/3. stream1 = RFStream([RFTrace(data=data_src), RFTrace(data=data_rsp)]) for i, tr in enumerate(stream1): tr.stats.channel = tr.stats.channel[:2] + 'LQT'[i] tr.stats.onset = tr.stats.starttime + 40 stream2 = stream1.copy() stream1.deconvolve(spiking=10) stream2.deconvolve(method='freq', waterlevel=0.1) # import matplotlib.pyplot as plt # plt.subplot(121) # plt.plot(data, label='desired') # plt.plot(data_src, label='source') # plt.plot(data_rsp, label='convolution') # plt.plot(stream1[0].data, label='deconv src') # plt.plot(stream1[1].data, label='deconv') # plt.legend() # plt.subplot(122) # plt.plot(data) # plt.plot(data_src) # plt.plot(data_rsp) # plt.plot(stream2[0].data) # plt.plot(stream2[1].data) # plt.show() # (shift from middle of source (50) to onset (40) peakpos = np.argmax(data) - 10 self.assertEqual(peakpos, np.argmax(stream1[1].data)) self.assertEqual(peakpos, np.argmax(stream2[1].data))
def run_rf(events, inventory, get_waveforms, path, format='H5', request_window=None, phase='P', dist_range=None, tt_model='iasp91', pp_depth=None, pp_phase=None, model='iasp91', **rf_kwargs): root = phase + 'rf' _check_path(join(path, root)) method = phase[-1].upper() if dist_range is None: dist_range = (30, 90) if method == 'P' else (60, 85) if request_window is None: request_window = (-50, 150) if method == 'P' else (-100, 50) for kwargs, event, coords in _iter(events, inventory, rf=True): stats = rfstats(station=coords, event=event, phase=phase, dist_range=dist_range, tt_model=tt_model, pp_depth=pp_depth, pp_phase=pp_phase, model=model) if not stats: continue kwargs.update({ 'starttime': stats.onset + request_window[0], 'endtime': stats.onset + request_window[1] }) stream = get_waveforms(**kwargs) if stream is None: continue stream = RFStream(stream, warn=False) stream.merge() if len(stream) != 3: import warnings warnings.warn('Need 3 component seismograms. More or less ' 'than three components for event %s, station %s.' % (stats.event_id, kwargs['seed_id'])) continue for tr in stream: tr.stats.update(stats) stream.rf(method=method, **rf_kwargs) if len(stream) != 3: continue _write(stream, path, root, format)
def iter_event_data(events, inventory, get_waveforms, phase='P', request_window=None, pad=10, pbar=None, **kwargs): """ Return iterator yielding three component streams per station and event. :param events: list of events or `~obspy.core.event.Catalog` instance :param inventory: `~obspy.core.inventory.inventory.Inventory` instance with station and channel information :param get_waveforms: Function returning the data. It has to take the arguments network, station, location, channel, starttime, endtime. :param phase: Considered phase, e.g. 'P', 'S', 'PP' :type request_window: tuple (start, end) :param request_window: requested time window around the onset of the phase :param float pad: add specified time in seconds to request window and trim afterwards again :param pbar: tqdm_ instance for displaying a progressbar :return: three component streams with raw data Example usage with progressbar:: from tqdm import tqdm from rf.util import iter_event_data with tqdm() as t: for stream3c in iter_event_data(*args, pbar=t): do_something(stream3c) .. _tqdm: https://pypi.python.org/pypi/tqdm """ from rf.rfstream import rfstats, RFStream method = phase[-1].upper() if request_window is None: request_window = (-50, 150) if method == 'P' else (-100, 50) stations = _get_stations(inventory) if pbar is not None: pbar.total = len(events) * len(stations) for event, seedid in itertools.product(events, stations): if pbar is not None: pbar.update(1) origin_time = (event.preferred_origin() or event.origins[0])['time'] try: args = (seedid[:-1] + stations[seedid], origin_time) coords = inventory.get_coordinates(*args) except: # station not available at that time continue stats = rfstats(station=coords, event=event, phase=phase, **kwargs) if not stats: continue net, sta, loc, cha = seedid.split('.') starttime = stats.onset + request_window[0] endtime = stats.onset + request_window[1] kws = {'network': net, 'station': sta, 'location': loc, 'channel': cha, 'starttime': starttime - pad, 'endtime': endtime + pad} try: stream = get_waveforms(**kws) except: # no data available continue stream.trim(starttime, endtime) stream.merge() if len(stream) != 3: from warnings import warn warn('Need 3 component seismograms. %d components ' 'detected for event %s, station %s.' % (len(stream), event.resource_id, seedid)) continue if any(isinstance(tr.data, np.ma.masked_array) for tr in stream): from warnings import warn warn('Gaps or overlaps detected for event %s, station %s.' % (event.resource_id, seedid)) continue for tr in stream: tr.stats.update(stats) yield RFStream(stream, warn=False)
def run_commands(command, commands=(), events=None, inventory=None, objects=None, get_waveforms=None, data=None, plugin=None, phase=None, moveout_phase=None, path_in=None, path_out=None, format='Q', newformat=None, **kw): """Load files, apply commands and write result files.""" for opt in kw: if opt not in DICT_OPTIONS: raise ParseError('Unknown config option: %s' % opt) for opt in DICT_OPTIONS: default = None if opt == 'boxbins' else {} d = kw.setdefault(opt, default) if isinstance(d, basestring): kw[opt] = json.loads(d) if phase is not None: kw['options']['phase'] = phase if moveout_phase is not None: kw['moveout']['phase'] = moveout_phase if kw['boxbins'] is not None: kw['boxes']['bins'] = np.linspace(*kw['boxbins']) try: if command == 'calc': assert len(commands) < 3 if len(commands) == 2: assert commands[0] != commands[1] elif command == 'calc': assert len(commands) < 2 except: raise ParseError('calc or moveout command given more than once') # Read events and inventory try: if command in ('stack', 'plot'): events = None elif command != 'print' or objects[0] == 'events': if (not isinstance(events, obspy.Catalog) or not isinstance(events, list) or (len(events) == 2 and isinstance(events[0], basestring))): if isinstance(events, basestring): format_ = None else: events, format_ = events events = obspy.read_events(events, format_) if command != 'print' or objects[0] == 'stations': if not isinstance(inventory, obspy.Inventory): if isinstance(inventory, basestring): format_ = None else: inventory, format_ = inventory inventory = obspy.read_inventory(inventory, format_) except (KeyboardInterrupt, SystemExit): raise except: print('cannot read events or stations') return # Initialize get_waveforms if command == 'data': try: # Initialize get_waveforms if get_waveforms is None: get_waveforms = init_data(data, client_options=kw['client_options'], plugin=plugin) except (KeyboardInterrupt, SystemExit): raise except: print('cannot initalize data') return # Print command if command == 'print': if objects[0] == 'events': print(events.__str__(True)) elif objects[0] == 'stations': print(inventory) else: from rf.rfstream import RFStream stream = sum((read_rf(fname) for fname in objects), RFStream()) print(stream.__str__(True)) return # Select appropriate iterator if command == 'data': iter_ = iter_event_data(events, inventory, get_waveforms, pbar=tqdm(), **kw['options']) elif command == 'plot-profile': iter_ = _iter_profile(path_in, format) else: yt = command == 'profile' iter_ = iter_event_processed_data(events, inventory, path_in, format, pbar=tqdm(), yield_traces=yt) # Run all commands if command == 'convert': for stream in iter_: write(stream, path_out, newformat) elif command == 'plot': for stream in iter_: channels = set(tr.stats.channel for tr in stream) for ch in channels: st2 = stream.select(channel=ch) fname = PLOT_FNAMES.format(root=path_out, **st2[0].stats) _create_dir(fname) st2.sort(['back_azimuth']) st2.plot_rf(fname, **kw['plot']) elif command == 'plot-profile': for stream in iter_: channels = set(tr.stats.channel for tr in stream) for ch in channels: st2 = stream.select(channel=ch) fname = PLOT_PROFILE_FNAMES.format(root=path_out, **st2[0].stats) _create_dir(fname) st2.plot_profile(fname, **kw['plot_profile']) elif command == 'stack': for stream in iter_: stack = stream.stack() write(stack, path_out, format, type='stack') elif command == 'profile': from rf.profile import get_profile_boxes, profile boxx = get_profile_boxes(**kw['boxes']) prof = profile(iter_, boxx, **kw['profile']) write(prof, path_out, format, type='profile') else: commands = [command] + list(commands) for stream in iter_: for command in commands: if command == 'data': pass elif command == 'calc': stream.rf(**kw['rf']) elif command == 'moveout': stream.moveout(**kw['moveout']) else: raise NotImplementedError write(stream, path_out, format)