def test_rt_offset(self): offset = 500 rt_trace = RtTrace() rt_trace.registerRtProcess('offset', offset=offset) for tr in self.traces: rt_trace.append(tr, gap_overlap_check=True) diff = self.data_trace.copy() diff.data = rt_trace.data - self.data_trace.data self.assertAlmostEquals(np.mean(np.abs(diff)), offset)
def test_rt_offset(self): offset=500 rt_trace=RtTrace() rt_trace.registerRtProcess('offset',offset=offset) for tr in self.traces: rt_trace.append(tr, gap_overlap_check = True) diff=self.data_trace.copy() diff.data=rt_trace.data-self.data_trace.data self.assertAlmostEquals(np.mean(np.abs(diff)),offset)
def test_rt_neg_to_zero(self): data_trace=self.data_trace.copy() max_val=np.max(data_trace.data) rt_trace=RtTrace() rt_trace.registerRtProcess('neg_to_zero') for tr in self.traces: rt_trace.append(tr, gap_overlap_check = True) max_val_test=np.max(rt_trace.data) min_val_test=np.min(rt_trace.data) self.assertEqual(max_val, max_val_test) self.assertEqual(0.0, min_val_test)
def test_rt_neg_to_zero(self): data_trace = self.data_trace.copy() max_val = np.max(data_trace.data) rt_trace = RtTrace() rt_trace.registerRtProcess('neg_to_zero') for tr in self.traces: rt_trace.append(tr, gap_overlap_check=True) max_val_test = np.max(rt_trace.data) min_val_test = np.min(rt_trace.data) self.assertEqual(max_val, max_val_test) self.assertEqual(0.0, min_val_test)
def test_rt_variance(self): win = 10 data_trace = self.data_trace.copy() rt_single = RtTrace() rt_trace = RtTrace() rt_trace.registerRtProcess('variance', win=win) rt_single.registerRtProcess('variance', win=win) for tr in self.traces: rt_trace.append(tr, gap_overlap_check=True) rt_single.append(data_trace, gap_overlap_check=True) assert_array_almost_equal(rt_single, rt_trace)
def test_rt_scale(self): data_trace = self.data_trace.copy() fact = 1 / np.std(data_trace.data) data_trace.data *= fact rt_trace = RtTrace() rt_trace.registerRtProcess('scale', factor=fact) for tr in self.traces: rt_trace.append(tr, gap_overlap_check=True) diff = self.data_trace.copy() diff.data = rt_trace.data - data_trace.data self.assertAlmostEquals(np.mean(np.abs(diff)), 0.0)
def test_rt_scale(self): data_trace = self.data_trace.copy() fact=1/np.std(data_trace.data) data_trace.data *= fact rt_trace=RtTrace() rt_trace.registerRtProcess('scale',factor=fact) for tr in self.traces: rt_trace.append(tr, gap_overlap_check = True) diff=self.data_trace.copy() diff.data=rt_trace.data-data_trace.data self.assertAlmostEquals(np.mean(np.abs(diff)),0.0)
def test_rt_variance(self): win=10 data_trace=self.data_trace.copy() rt_single=RtTrace() rt_trace=RtTrace() rt_trace.registerRtProcess('variance',win=win) rt_single.registerRtProcess('variance',win=win) for tr in self.traces: rt_trace.append(tr, gap_overlap_check = True) rt_single.append(data_trace, gap_overlap_check = True) assert_array_almost_equal(rt_single, rt_trace)
def test_sw_kurtosis(self): win=3.0 data_trace = self.data_trace.copy() rt_trace=RtTrace() rt_single=RtTrace() rt_trace.registerRtProcess('sw_kurtosis',win=win) rt_single.registerRtProcess('sw_kurtosis',win=win) for tr in self.traces: rt_trace.append(tr, gap_overlap_check = True) rt_single.append(data_trace) diff=self.data_trace.copy() diff.data=rt_trace.data-rt_single.data self.assertAlmostEquals(np.mean(np.abs(diff)),0.0)
def test_sw_kurtosis(self): win = 3.0 data_trace = self.data_trace.copy() rt_trace = RtTrace() rt_single = RtTrace() rt_trace.registerRtProcess('sw_kurtosis', win=win) rt_single.registerRtProcess('sw_kurtosis', win=win) for tr in self.traces: rt_trace.append(tr, gap_overlap_check=True) rt_single.append(data_trace) diff = self.data_trace.copy() diff.data = rt_trace.data - rt_single.data self.assertAlmostEquals(np.mean(np.abs(diff)), 0.0)
def test_rt_mean(self): win = 0.05 data_trace = self.data_trace.copy() rt_single = RtTrace() rt_trace = RtTrace() rt_trace.registerRtProcess('mean', win=win) rt_single.registerRtProcess('mean', win=win) for tr in self.traces: rt_trace.append(tr, gap_overlap_check=True) rt_single.append(data_trace, gap_overlap_check=True) newtr = self.data_trace.copy() newtr.data = newtr.data - rt_trace.data assert_array_almost_equal(rt_single, rt_trace) self.assertAlmostEqual(np.mean(newtr.data), 0.0, 0)
def test_rt_mean(self): win=0.05 data_trace=self.data_trace.copy() rt_single=RtTrace() rt_trace=RtTrace() rt_trace.registerRtProcess('mean',win=win) rt_single.registerRtProcess('mean',win=win) for tr in self.traces: rt_trace.append(tr, gap_overlap_check = True) rt_single.append(data_trace, gap_overlap_check = True) newtr=self.data_trace.copy() newtr.data=newtr.data-rt_trace.data assert_array_almost_equal(rt_single, rt_trace) self.assertAlmostEqual(np.mean(newtr.data),0.0,0)
def test_rt_kurtosis_dec(self): win = 5.0 data_trace = self.data_trace_filt.copy() data_trace_dec = self.data_trace_filt.copy() # no need to filter as we're using a pre-filtered trace data_trace_dec.decimate(5, no_filter=True) rt_trace = RtTrace() rt_dec = RtTrace() rt_trace.registerRtProcess('kurtosis', win=win) rt_dec.registerRtProcess('kurtosis', win=win) rt_trace.append(data_trace, gap_overlap_check=True) rt_dec.append(data_trace_dec, gap_overlap_check=True) newtr = rt_trace.copy() newtr.decimate(5, no_filter=True) #assert_array_almost_equal(rt_dec.data, newtr.data, 0) diff = (np.max(rt_dec.data) - np.max(newtr.data)) / np.max(rt_dec.data) self.assertAlmostEquals(np.abs(diff), 0.0, 2)
def test_rt_kurtosis_dec(self): win=5.0 data_trace=self.data_trace_filt.copy() data_trace_dec=self.data_trace_filt.copy() # no need to filter as we're using a pre-filtered trace data_trace_dec.decimate(5,no_filter=True) rt_trace=RtTrace() rt_dec=RtTrace() rt_trace.registerRtProcess('kurtosis',win=win) rt_dec.registerRtProcess('kurtosis',win=win) rt_trace.append(data_trace, gap_overlap_check = True) rt_dec.append(data_trace_dec, gap_overlap_check = True) newtr=rt_trace.copy() newtr.decimate(5, no_filter=True) #assert_array_almost_equal(rt_dec.data, newtr.data, 0) diff=(np.max(rt_dec.data)-np.max(newtr.data)) / np.max(rt_dec.data) self.assertAlmostEquals(np.abs(diff) , 0.0, 2)
def test_rt_kurtosis(self): win = 3.0 data_trace = self.data_trace.copy() sigma = float(np.std(data_trace.data)) fact = 1 / sigma dt = data_trace.stats.delta C1 = dt / float(win) x = data_trace.data ktrace = data_trace.copy() ktrace.data = rec_kurtosis(x * fact, C1) rt_trace = RtTrace() rt_trace.registerRtProcess('scale', factor=fact) rt_trace.registerRtProcess('kurtosis', win=win) for tr in self.traces: rt_trace.append(tr, gap_overlap_check=True) diff = self.data_trace.copy() diff.data = rt_trace.data - ktrace.data self.assertAlmostEquals(np.mean(np.abs(diff)), 0.0)
def test_registerRtProcess(self): """ Testing registerRtProcess method. """ tr = RtTrace() # 1 - function call tr.registerRtProcess(np.abs) self.assertEqual(tr.processing, [(np.abs, {}, None)]) # 2 - predefined RT processing algorithm tr.registerRtProcess('integrate', test=1, muh='maeh') self.assertEqual(tr.processing[1][0], 'integrate') self.assertEqual(tr.processing[1][1], {'test': 1, 'muh': 'maeh'}) self.assertTrue(isinstance(tr.processing[1][2][0], RtMemory)) # 3 - contained name of predefined RT processing algorithm tr.registerRtProcess('in') self.assertEqual(tr.processing[2][0], 'integrate') tr.registerRtProcess('integ') self.assertEqual(tr.processing[3][0], 'integrate') tr.registerRtProcess('integr') self.assertEqual(tr.processing[4][0], 'integrate') # 4 - unknown functions self.assertRaises(NotImplementedError, tr.registerRtProcess, 'integrate2') self.assertRaises(NotImplementedError, tr.registerRtProcess, 'xyz') # 5 - module instead of function self.assertRaises(NotImplementedError, tr.registerRtProcess, np) # check number off all processing steps within RtTrace self.assertEqual(len(tr.processing), 5) # check tr.stats.processing self.assertEqual(len(tr.stats.processing), 5) self.assertTrue(tr.stats.processing[0].startswith("realtime_process")) self.assertTrue('absolute' in tr.stats.processing[0]) for i in range(1, 5): self.assertTrue('integrate' in tr.stats.processing[i]) # check kwargs self.assertTrue("maeh" in tr.stats.processing[1])
def test_rt_kurtosis(self): win=3.0 data_trace = self.data_trace.copy() sigma=float(np.std(data_trace.data)) fact = 1/sigma dt=data_trace.stats.delta C1=dt/float(win) x=data_trace.data ktrace=data_trace.copy() ktrace.data=rec_kurtosis(x*fact,C1) rt_trace=RtTrace() rt_trace.registerRtProcess('scale',factor=fact) rt_trace.registerRtProcess('kurtosis',win=win) for tr in self.traces: rt_trace.append(tr, gap_overlap_check = True) diff=self.data_trace.copy() diff.data=rt_trace.data-ktrace.data self.assertAlmostEquals(np.mean(np.abs(diff)),0.0)
def test_kwin_bank(self): win_list = [1.0, 3.0, 9.0] n_win = len(win_list) data_trace = self.data_trace.copy() sigma = float(np.std(data_trace.data)) fact = 1 / sigma # One RtTrace for processing before the kurtosis rt_trace = RtTrace() rt_trace.registerRtProcess('scale', factor=fact) # One RtTrace per kurtosis window kurt_traces = [] for i in xrange(n_win): rtt = RtTrace() rtt.registerRtProcess('kurtosis', win=win_list[i]) kurt_traces.append(rtt) # One RrTrace for post-processing the max kurtosis window max_kurt = RtTrace() max_kurt.registerRtProcess('differentiate') max_kurt.registerRtProcess('neg_to_zero') for tr in self.traces: # prepare memory for kurtosis kurt_tr = tr.copy() # do initial processing proc_trace = rt_trace.append(tr, gap_overlap_check=True) kurt_output = [] for i in xrange(n_win): # pass output of initial processing to the kwin bank ko = kurt_traces[i].append(proc_trace, gap_overlap_check=True) # append the output to the kurt_output list kurt_output.append(ko.data) # stack the output of the kwin bank and find maximum kurt_stack = np.vstack(tuple(kurt_output)) kurt_tr.data = np.max(kurt_stack, axis=0) # append to the max_kurt RtTrace for post-processing max_kurt.append(kurt_tr)
def test_kwin_bank(self): win_list=[1.0, 3.0, 9.0] n_win = len(win_list) data_trace = self.data_trace.copy() sigma=float(np.std(data_trace.data)) fact = 1/sigma # One RtTrace for processing before the kurtosis rt_trace=RtTrace() rt_trace.registerRtProcess('scale',factor=fact) # One RtTrace per kurtosis window kurt_traces=[] for i in xrange(n_win): rtt=RtTrace() rtt.registerRtProcess('kurtosis',win=win_list[i]) kurt_traces.append(rtt) # One RrTrace for post-processing the max kurtosis window max_kurt=RtTrace() max_kurt.registerRtProcess('differentiate') max_kurt.registerRtProcess('neg_to_zero') for tr in self.traces: # prepare memory for kurtosis kurt_tr=tr.copy() # do initial processing proc_trace=rt_trace.append(tr, gap_overlap_check = True) kurt_output=[] for i in xrange(n_win): # pass output of initial processing to the kwin bank ko=kurt_traces[i].append(proc_trace, gap_overlap_check = True) # append the output to the kurt_output list kurt_output.append(ko.data) # stack the output of the kwin bank and find maximum kurt_stack=np.vstack(tuple(kurt_output)) kurt_tr.data=np.max(kurt_stack,axis=0) # append to the max_kurt RtTrace for post-processing max_kurt.append(kurt_tr)
def test_copy(self): """ Testing copy of RtTrace object. """ rtr = RtTrace() rtr.copy() # register predefined function rtr.registerRtProcess('integrate', test=1, muh='maeh') rtr.copy() # register ObsPy function call rtr.registerRtProcess(filter.bandpass, freqmin=0, freqmax=1, df=0.1) rtr.copy() # register NumPy function call rtr.registerRtProcess(np.square) rtr.copy()
def test_missingOrWrongArgumentInRtProcess(self): """ Tests handling of missing/wrong arguments. """ trace = Trace(np.arange(100)) # 1- function scale needs no additional arguments rt_trace = RtTrace() rt_trace.registerRtProcess('scale') rt_trace.append(trace) # adding arbitrary arguments should fail rt_trace = RtTrace() rt_trace.registerRtProcess('scale', muh='maeh') self.assertRaises(TypeError, rt_trace.append, trace) # 2- function tauc has one required argument rt_trace = RtTrace() rt_trace.registerRtProcess('tauc', width=10) rt_trace.append(trace) # wrong argument should fail rt_trace = RtTrace() rt_trace.registerRtProcess('tauc', xyz='xyz') self.assertRaises(TypeError, rt_trace.append, trace) # missing argument width should raise an exception rt_trace = RtTrace() rt_trace.registerRtProcess('tauc') self.assertRaises(TypeError, rt_trace.append, trace) # adding arbitrary arguments should fail rt_trace = RtTrace() rt_trace.registerRtProcess('tauc', width=20, notexistingoption=True) self.assertRaises(TypeError, rt_trace.append, trace)
class RtMigrator(object): """ Class of objects for real-time migration. """ # attributes x = np.array([]) y = np.array([]) z = np.array([]) ttimes_matrix = np.empty((0, 0), dtype=float) npts = 0 nsta = 0 sta_list = [] obs_rt_list = [] point_rt_list = [] stack_list = [] max_out = None x_out = None y_out = None z_out = None last_common_end_stack = [] last_common_end_max = None dt = 1.0 filter_shift = 0.0 def __init__(self, waveloc_options): """ Initialize from a set of travel-times as hdf5 files """ wo = waveloc_options # initialize the travel-times ############################# ttimes_fnames = glob.glob(wo.ttimes_glob) # get basic lengths f = h5py.File(ttimes_fnames[0], 'r') # copy the x, y, z data over self.x = np.array(f['x'][:]) self.y = np.array(f['y'][:]) self.z = np.array(f['z'][:]) f.close() # read the files ttimes_list = [] self.sta_list = [] for fname in ttimes_fnames: f = h5py.File(fname, 'r') # update the list of ttimes ttimes_list.append(np.array(f['ttimes'])) sta = f['ttimes'].attrs['station'] f.close() # update the dictionary of station names self.sta_list.append(sta) # stack the ttimes into a numpy array self.ttimes_matrix = np.vstack(ttimes_list) (self.nsta, self.npts) = self.ttimes_matrix.shape # initialize the RtTrace(s) ########################## max_length = wo.opdict['max_length'] self.safety_margin = wo.opdict['safety_margin'] self.dt = wo.opdict['dt'] # need a RtTrace per station self.obs_rt_list = [RtTrace() for sta in self.sta_list] # register pre-processing self._register_preprocessing(wo) # need nsta streams for each point we test (nsta x npts) # for shifted waveforms self.point_rt_list=[[RtTrace(max_length=max_length) \ for ista in xrange(self.nsta)] for ip in xrange(self.npts)] # register processing of point-streams here for sta_list in self.point_rt_list: for rtt in sta_list: # This is where we would scale for distance (given pre-calculated # distances from each point to every station) rtt.registerRtProcess('scale', factor=1.0) # need npts streams to store the point-stacks self.stack_list = [ RtTrace(max_length=max_length) for ip in xrange(self.npts) ] # register stack procesing here for rtt in self.stack_list: # This is where we would add or lower weights if we wanted to rtt.registerRtProcess('scale', factor=1.0) # need 4 output streams (max, x, y, z) self.max_out = RtTrace() self.x_out = RtTrace() self.y_out = RtTrace() self.z_out = RtTrace() if not wo.is_syn: self.max_out.registerRtProcess('boxcar', width=50) # need a list of common start-times self.last_common_end_stack = [ UTCDateTime(1970, 1, 1) for i in xrange(self.npts) ] self.last_common_end_max = UTCDateTime(1970, 1, 1) def _register_preprocessing(self, waveloc_options): wo = waveloc_options # if this is a synthetic if wo.is_syn: # do dummy processing only for rtt in self.obs_rt_list: rtt.registerRtProcess('scale', factor=1.0) else: # get gaussian filtering parameters f0, sigma, dt = wo.gauss_filter gauss, self.filter_shift = gaussian_filter(f0, sigma, dt) # get kwin # for now just use one window kwin = wo.opdict['kwin'] # register pre-processing of data here for rtt in self.obs_rt_list: rtt.registerRtProcess('convolve', conv_signal=gauss) rtt.registerRtProcess('sw_kurtosis', win=kwin) rtt.registerRtProcess('boxcar', width=50) rtt.registerRtProcess('differentiate') rtt.registerRtProcess('neg_to_zero') def updateData(self, tr_list): """ Adds a list of traces (one per station) to the system """ t_copy = 0.0 t_append = 0.0 t_append_proc = 0.0 t0_update = time.time() for tr in tr_list: if (self.dt != tr.stats.delta): msg = 'Value of dt from options file %.2f does not match dt from data %2f' % ( self.dt, tr.stats.delta) raise ValueError() # pre-correct for filter_shift #tr.stats.starttime -= np.round(self.filter_shift/self.dt) * self.dt tr.stats.starttime -= self.filter_shift sta = tr.stats.station ista = self.sta_list.index(sta) # make dtype of data float if it is not already tr.data = tr.data.astype(np.float32) t0 = time.time() pp_data = self.obs_rt_list[ista].append(tr, gap_overlap_check=True) t_append_proc += time.time() - t0 # loop over points for ip in xrange(self.npts): # do time shift and append t0 = time.time() pp_data_tmp = pp_data.copy() t_copy += time.time() - t0 pp_data_tmp.stats.starttime -= np.round( self.ttimes_matrix[ista, ip] / self.dt) * self.dt t0 = time.time() self.point_rt_list[ip][ista].append(pp_data_tmp, gap_overlap_check=True) t_append += time.time() - t0 print "In updateData : %.2f s in process and %.2f s in data copy and %.2f s in append and a total of %.2f s" % ( t_append_proc, t_copy, t_append, time.time() - t0_update) def updateStacks(self): npts = self.npts for ip in xrange(npts): self._updateStack(ip) def _updateStack(self, ip): UTCDateTime.DEFAULT_PRECISION = 2 nsta = self.nsta # get common start-time for this point common_start=max([self.point_rt_list[ip][ista].stats.starttime \ for ista in xrange(nsta)]) common_start = max(common_start, self.last_common_end_stack[ip]) # get list of stations for which the end-time is compatible # with the common_start time and the safety buffer ista_ok = [ ista for ista in xrange(nsta) if (self.point_rt_list[ip][ista].stats.endtime - common_start) > self.safety_margin ] # get common end-time common_end = min( [self.point_rt_list[ip][ista].stats.endtime for ista in ista_ok]) self.last_common_end_stack[ip] = common_end + self.dt # stack c_list = [] for ista in ista_ok: tr = self.point_rt_list[ip][ista].copy() tr.trim(common_start, common_end) c_list.append(np.array(tr.data[:])) tr_common = np.vstack(c_list) # prepare trace for passing up stack_data = np.sum(tr_common, axis=0) stats={'station':'STACK', 'npts':len(stack_data), 'delta':self.dt, \ 'starttime':common_start} tr = Trace(data=stack_data, header=stats) #import pdb; pdb.set_trace() # append to appropriate stack_list self.stack_list[ip].append(tr, gap_overlap_check=True) def updateMax(self): npts = self.npts nsta = self.nsta # now extract maximum etc from stacks # get common start-time for this point common_start=max([self.stack_list[ip].stats.starttime \ for ip in xrange(npts)]) common_start = max(common_start, self.last_common_end_max) # get list of points for which the end-time is compatible # with the common_start time and the safety buffer ip_ok = [ ip for ip in xrange(npts) if (self.stack_list[ip].stats.endtime - common_start) > self.safety_margin ] common_end = min([self.stack_list[ip].stats.endtime for ip in ip_ok]) self.last_common_end_max = common_end + self.dt # stack c_list = [] for ip in ip_ok: tr = self.stack_list[ip].copy() tr.trim(common_start, common_end) c_list.append(tr.data) tr_common = np.vstack(c_list) # get maximum and the corresponding point max_data = np.max(tr_common, axis=0) argmax_data = np.argmax(tr_common, axis=0) # prepare traces for passing up # max stats={'station':'Max', 'npts':len(max_data), 'delta':self.dt, \ 'starttime':common_start} tr_max = Trace(data=max_data, header=stats) self.max_out.append(tr_max, gap_overlap_check=True) # x coordinate stats['station'] = 'xMax' tr_x = Trace(data=self.x[argmax_data], header=stats) self.x_out.append(tr_x, gap_overlap_check=True) # y coordinate stats['station'] = 'yMax' tr_y = Trace(data=self.y[argmax_data], header=stats) self.y_out.append(tr_y, gap_overlap_check=True) # z coordinate stats['station'] = 'zMax' tr_z = Trace(data=self.z[argmax_data], header=stats) self.z_out.append(tr_z, gap_overlap_check=True)
class RealTimeSignalTestCase(unittest.TestCase): """ The obspy.realtime.signal test suite. """ def __init__(self, *args, **kwargs): super(RealTimeSignalTestCase, self).__init__(*args, **kwargs) # read test data as float64 self.orig_trace = read(os.path.join(os.path.dirname(__file__), 'data', 'II.TLY.BHZ.SAC'), dtype='f8')[0] # make really sure test data is float64 self.orig_trace.data = np.require(self.orig_trace.data, 'f8') self.orig_trace_chunks = self.orig_trace / NUM_PACKETS def setUp(self): # clear results self.filt_trace_data = None self.rt_trace = None self.rt_appended_traces = [] def tearDown(self): # use results for debug plots if enabled if PLOT_TRACES and self.filt_trace_data is not None and \ self.rt_trace is not None and self.rt_appended_traces: self._plotResults() def test_square(self): """ Testing np.square function. """ trace = self.orig_trace.copy() # filtering manual self.filt_trace_data = np.square(trace) # filtering real time process_list = [(np.square, {})] self._runRtProcess(process_list) # check results np.testing.assert_almost_equal(self.filt_trace_data, self.rt_trace.data) def test_integrate(self): """ Testing integrate function. """ trace = self.orig_trace.copy() # filtering manual self.filt_trace_data = signal.integrate(trace) # filtering real time process_list = [('integrate', {})] self._runRtProcess(process_list) # check results np.testing.assert_almost_equal(self.filt_trace_data, self.rt_trace.data) def test_differentiate(self): """ Testing differentiate function. """ trace = self.orig_trace.copy() # filtering manual self.filt_trace_data = signal.differentiate(trace) # filtering real time process_list = [('differentiate', {})] self._runRtProcess(process_list) # check results np.testing.assert_almost_equal(self.filt_trace_data, self.rt_trace.data) def test_boxcar(self): """ Testing boxcar function. """ trace = self.orig_trace.copy() options = {'width': 500} # filtering manual self.filt_trace_data = signal.boxcar(trace, **options) # filtering real time process_list = [('boxcar', options)] self._runRtProcess(process_list) # check results peak = np.amax(np.abs(self.rt_trace.data)) self.assertAlmostEqual(peak, 566974.214, 3) np.testing.assert_almost_equal(self.filt_trace_data, self.rt_trace.data) def test_scale(self): """ Testing scale function. """ trace = self.orig_trace.copy() options = {'factor': 1000} # filtering manual self.filt_trace_data = signal.scale(trace, **options) # filtering real time process_list = [('scale', options)] self._runRtProcess(process_list) # check results peak = np.amax(np.abs(self.rt_trace.data)) self.assertEqual(peak, 1045237000.0) np.testing.assert_almost_equal(self.filt_trace_data, self.rt_trace.data) def test_offset(self): """ Testing offset function. """ trace = self.orig_trace.copy() options = {'offset': 500} # filtering manual self.filt_trace_data = signal.offset(trace, **options) # filtering real time process_list = [('offset', options)] self._runRtProcess(process_list) # check results diff = self.rt_trace.data - self.orig_trace.data self.assertEqual(np.mean(diff), 500) np.testing.assert_almost_equal(self.filt_trace_data, self.rt_trace.data) def test_kurtosis(self): """ Testing kurtosis function. """ trace = self.orig_trace.copy() options = {'win': 5} # filtering manual self.filt_trace_data = signal.kurtosis(trace, **options) # filtering real time process_list = [('kurtosis', options)] self._runRtProcess(process_list) # check results np.testing.assert_almost_equal(self.filt_trace_data, self.rt_trace.data) def test_abs(self): """ Testing np.abs function. """ trace = self.orig_trace.copy() # filtering manual self.filt_trace_data = np.abs(trace) # filtering real time process_list = [(np.abs, {})] self._runRtProcess(process_list) # check results peak = np.amax(np.abs(self.rt_trace.data)) self.assertEqual(peak, 1045237) np.testing.assert_almost_equal(self.filt_trace_data, self.rt_trace.data) def test_tauc(self): """ Testing tauc function. """ trace = self.orig_trace.copy() options = {'width': 60} # filtering manual self.filt_trace_data = signal.tauc(trace, **options) # filtering real time process_list = [('tauc', options)] self._runRtProcess(process_list) # check results peak = np.amax(np.abs(self.rt_trace.data)) self.assertAlmostEqual(peak, 114.302, 3) np.testing.assert_almost_equal(self.filt_trace_data, self.rt_trace.data) def test_mwpIntegral(self): """ Testing mwpIntegral functions. """ trace = self.orig_trace.copy() options = { 'mem_time': 240, 'ref_time': trace.stats.starttime + 301.506, 'max_time': 120, 'gain': 1.610210e+09 } # filtering manual self.filt_trace_data = signal.mwpIntegral(self.orig_trace.copy(), **options) # filtering real time process_list = [('mwpIntegral', options)] self._runRtProcess(process_list) # check results np.testing.assert_almost_equal(self.filt_trace_data, self.rt_trace.data) def test_mwp(self): """ Testing Mwp calculation using two processing functions. """ trace = self.orig_trace.copy() epicentral_distance = 30.0855 options = { 'mem_time': 240, 'ref_time': trace.stats.starttime + 301.506, 'max_time': 120, 'gain': 1.610210e+09 } # filtering manual trace.data = signal.integrate(trace) self.filt_trace_data = signal.mwpIntegral(trace, **options) # filtering real time process_list = [('integrate', {}), ('mwpIntegral', options)] self._runRtProcess(process_list) # check results peak = np.amax(np.abs(self.rt_trace.data)) mwp = signal.calculateMwpMag(peak, epicentral_distance) self.assertAlmostEqual(mwp, 8.78902911791, 5) np.testing.assert_almost_equal(self.filt_trace_data, self.rt_trace.data) def test_combined(self): """ Testing combining integrate and differentiate functions. """ trace = self.orig_trace.copy() # filtering manual trace.data = signal.integrate(trace) self.filt_trace_data = signal.differentiate(trace) # filtering real time process_list = [('int', {}), ('diff', {})] self._runRtProcess(process_list) # check results trace = self.orig_trace.copy() np.testing.assert_almost_equal(self.filt_trace_data, self.rt_trace.data) np.testing.assert_almost_equal(trace.data[1:], self.rt_trace.data[1:]) np.testing.assert_almost_equal(trace.data[1:], self.filt_trace_data[1:]) def _runRtProcess(self, process_list, max_length=None): """ Helper function to create a RtTrace, register all given process functions and run the real time processing. """ # assemble real time trace self.rt_trace = RtTrace(max_length=max_length) for (process, options) in process_list: self.rt_trace.registerRtProcess(process, **options) # append packet data to RtTrace self.rt_appended_traces = [] for trace in self.orig_trace_chunks: # process single trace result = self.rt_trace.append(trace, gap_overlap_check=True) # add to list of appended traces self.rt_appended_traces.append(result) def _plotResults(self): """ Plots original, filtered original and real time processed traces into a single plot. """ # plot only if test is started manually if __name__ != '__main__': return # create empty stream st = Stream() st.label = self._testMethodName # original trace self.orig_trace.label = "Original Trace" st += self.orig_trace # use header information of original trace with filtered trace data tr = self.orig_trace.copy() tr.data = self.filt_trace_data tr.label = "Filtered original Trace" st += tr # real processed chunks for i, tr in enumerate(self.rt_appended_traces): tr.label = "RT Chunk %02d" % (i + 1) st += tr # real time processed trace self.rt_trace.label = "RT Trace" st += self.rt_trace st.plot(automerge=False, color='blue', equal_scale=False)
class RealTimeSignalTestCase(unittest.TestCase): """ The obspy.realtime.signal test suite. """ def __init__(self, *args, **kwargs): super(RealTimeSignalTestCase, self).__init__(*args, **kwargs) # read test data as float64 self.orig_trace = read(os.path.join(os.path.dirname(__file__), 'data', 'II.TLY.BHZ.SAC'), dtype='f8')[0] # make really sure test data is float64 self.orig_trace.data = np.require(self.orig_trace.data, 'f8') self.orig_trace_chunks = self.orig_trace / NUM_PACKETS def setUp(self): # clear results self.filt_trace_data = None self.rt_trace = None self.rt_appended_traces = [] def tearDown(self): # use results for debug plots if enabled if PLOT_TRACES and self.filt_trace_data is not None and \ self.rt_trace is not None and self.rt_appended_traces: self._plotResults() def test_square(self): """ Testing np.square function. """ trace = self.orig_trace.copy() # filtering manual self.filt_trace_data = np.square(trace) # filtering real time process_list = [(np.square, {})] self._runRtProcess(process_list) # check results np.testing.assert_almost_equal(self.filt_trace_data, self.rt_trace.data) def test_integrate(self): """ Testing integrate function. """ trace = self.orig_trace.copy() # filtering manual self.filt_trace_data = signal.integrate(trace) # filtering real time process_list = [('integrate', {})] self._runRtProcess(process_list) # check results np.testing.assert_almost_equal(self.filt_trace_data, self.rt_trace.data) def test_differentiate(self): """ Testing differentiate function. """ trace = self.orig_trace.copy() # filtering manual self.filt_trace_data = signal.differentiate(trace) # filtering real time process_list = [('differentiate', {})] self._runRtProcess(process_list) # check results np.testing.assert_almost_equal(self.filt_trace_data, self.rt_trace.data) def test_boxcar(self): """ Testing boxcar function. """ trace = self.orig_trace.copy() options = {'width': 500} # filtering manual self.filt_trace_data = signal.boxcar(trace, **options) # filtering real time process_list = [('boxcar', options)] self._runRtProcess(process_list) # check results peak = np.amax(np.abs(self.rt_trace.data)) self.assertAlmostEqual(peak, 566974.214, 3) np.testing.assert_almost_equal(self.filt_trace_data, self.rt_trace.data) def test_scale(self): """ Testing scale function. """ trace = self.orig_trace.copy() options = {'factor': 1000} # filtering manual self.filt_trace_data = signal.scale(trace, **options) # filtering real time process_list = [('scale', options)] self._runRtProcess(process_list) # check results peak = np.amax(np.abs(self.rt_trace.data)) self.assertEqual(peak, 1045237000.0) np.testing.assert_almost_equal(self.filt_trace_data, self.rt_trace.data) def test_offset(self): """ Testing offset function. """ trace = self.orig_trace.copy() options = {'offset': 500} # filtering manual self.filt_trace_data = signal.offset(trace, **options) # filtering real time process_list = [('offset', options)] self._runRtProcess(process_list) # check results diff = self.rt_trace.data - self.orig_trace.data self.assertEqual(np.mean(diff), 500) np.testing.assert_almost_equal(self.filt_trace_data, self.rt_trace.data) def test_kurtosis(self): """ Testing kurtosis function. """ trace = self.orig_trace.copy() options = {'win': 5} # filtering manual self.filt_trace_data = signal.kurtosis(trace, **options) # filtering real time process_list = [('kurtosis', options)] self._runRtProcess(process_list) # check results np.testing.assert_almost_equal(self.filt_trace_data, self.rt_trace.data) def test_abs(self): """ Testing np.abs function. """ trace = self.orig_trace.copy() # filtering manual self.filt_trace_data = np.abs(trace) # filtering real time process_list = [(np.abs, {})] self._runRtProcess(process_list) # check results peak = np.amax(np.abs(self.rt_trace.data)) self.assertEqual(peak, 1045237) np.testing.assert_almost_equal(self.filt_trace_data, self.rt_trace.data) def test_tauc(self): """ Testing tauc function. """ trace = self.orig_trace.copy() options = {'width': 60} # filtering manual self.filt_trace_data = signal.tauc(trace, **options) # filtering real time process_list = [('tauc', options)] self._runRtProcess(process_list) # check results peak = np.amax(np.abs(self.rt_trace.data)) self.assertAlmostEqual(peak, 114.302, 3) np.testing.assert_almost_equal(self.filt_trace_data, self.rt_trace.data) def test_mwpIntegral(self): """ Testing mwpIntegral functions. """ trace = self.orig_trace.copy() options = {'mem_time': 240, 'ref_time': trace.stats.starttime + 301.506, 'max_time': 120, 'gain': 1.610210e+09} # filtering manual self.filt_trace_data = signal.mwpIntegral(self.orig_trace.copy(), **options) # filtering real time process_list = [('mwpIntegral', options)] self._runRtProcess(process_list) # check results np.testing.assert_almost_equal(self.filt_trace_data, self.rt_trace.data) def test_mwp(self): """ Testing Mwp calculation using two processing functions. """ trace = self.orig_trace.copy() epicentral_distance = 30.0855 options = {'mem_time': 240, 'ref_time': trace.stats.starttime + 301.506, 'max_time': 120, 'gain': 1.610210e+09} # filtering manual trace.data = signal.integrate(trace) self.filt_trace_data = signal.mwpIntegral(trace, **options) # filtering real time process_list = [('integrate', {}), ('mwpIntegral', options)] self._runRtProcess(process_list) # check results peak = np.amax(np.abs(self.rt_trace.data)) mwp = signal.calculateMwpMag(peak, epicentral_distance) self.assertAlmostEqual(mwp, 8.78902911791, 5) np.testing.assert_almost_equal(self.filt_trace_data, self.rt_trace.data) def test_combined(self): """ Testing combining integrate and differentiate functions. """ trace = self.orig_trace.copy() # filtering manual trace.data = signal.integrate(trace) self.filt_trace_data = signal.differentiate(trace) # filtering real time process_list = [('int', {}), ('diff', {})] self._runRtProcess(process_list) # check results trace = self.orig_trace.copy() np.testing.assert_almost_equal(self.filt_trace_data, self.rt_trace.data) np.testing.assert_almost_equal(trace.data[1:], self.rt_trace.data[1:]) np.testing.assert_almost_equal(trace.data[1:], self.filt_trace_data[1:]) def _runRtProcess(self, process_list, max_length=None): """ Helper function to create a RtTrace, register all given process functions and run the real time processing. """ # assemble real time trace self.rt_trace = RtTrace(max_length=max_length) for (process, options) in process_list: self.rt_trace.registerRtProcess(process, **options) # append packet data to RtTrace self.rt_appended_traces = [] for trace in self.orig_trace_chunks: # process single trace result = self.rt_trace.append(trace, gap_overlap_check=True) # add to list of appended traces self.rt_appended_traces.append(result) def _plotResults(self): """ Plots original, filtered original and real time processed traces into a single plot. """ # plot only if test is started manually if __name__ != '__main__': return # create empty stream st = Stream() st.label = self._testMethodName # original trace self.orig_trace.label = "Original Trace" st += self.orig_trace # use header information of original trace with filtered trace data tr = self.orig_trace.copy() tr.data = self.filt_trace_data tr.label = "Filtered original Trace" st += tr # real processed chunks for i, tr in enumerate(self.rt_appended_traces): tr.label = "RT Chunk %02d" % (i + 1) st += tr # real time processed trace self.rt_trace.label = "RT Trace" st += self.rt_trace st.plot(automerge=False, color='blue', equal_scale=False)
class RtMigrator(object): """ Class of objects for real-time migration. """ # attributes x=np.array([]) y=np.array([]) z=np.array([]) ttimes_matrix=np.empty((0,0), dtype=float) npts=0 nsta=0 sta_list=[] obs_rt_list=[] point_rt_list=[] stack_list=[] max_out=None x_out=None y_out=None z_out=None last_common_end_stack=[] last_common_end_max=None dt=1.0 filter_shift=0.0 def __init__(self,waveloc_options): """ Initialize from a set of travel-times as hdf5 files """ wo=waveloc_options # initialize the travel-times ############################# ttimes_fnames=glob.glob(wo.ttimes_glob) # get basic lengths f=h5py.File(ttimes_fnames[0],'r') # copy the x, y, z data over self.x = np.array(f['x'][:]) self.y = np.array(f['y'][:]) self.z = np.array(f['z'][:]) f.close() # read the files ttimes_list = [] self.sta_list=[] for fname in ttimes_fnames: f=h5py.File(fname,'r') # update the list of ttimes ttimes_list.append(np.array(f['ttimes'])) sta=f['ttimes'].attrs['station'] f.close() # update the dictionary of station names self.sta_list.append(sta) # stack the ttimes into a numpy array self.ttimes_matrix=np.vstack(ttimes_list) (self.nsta,self.npts) = self.ttimes_matrix.shape # initialize the RtTrace(s) ########################## max_length = wo.opdict['max_length'] self.safety_margin = wo.opdict['safety_margin'] self.dt = wo.opdict['dt'] # need a RtTrace per station self.obs_rt_list=[RtTrace() for sta in self.sta_list] # register pre-processing self._register_preprocessing(wo) # need nsta streams for each point we test (nsta x npts) # for shifted waveforms self.point_rt_list=[[RtTrace(max_length=max_length) \ for ista in xrange(self.nsta)] for ip in xrange(self.npts)] # register processing of point-streams here for sta_list in self.point_rt_list: for rtt in sta_list: # This is where we would scale for distance (given pre-calculated # distances from each point to every station) rtt.registerRtProcess('scale', factor=1.0) # need npts streams to store the point-stacks self.stack_list=[RtTrace(max_length=max_length) for ip in xrange(self.npts)] # register stack procesing here for rtt in self.stack_list: # This is where we would add or lower weights if we wanted to rtt.registerRtProcess('scale', factor=1.0) # need 4 output streams (max, x, y, z) self.max_out = RtTrace() self.x_out = RtTrace() self.y_out = RtTrace() self.z_out = RtTrace() if not wo.is_syn: self.max_out.registerRtProcess('boxcar', width=50) # need a list of common start-times self.last_common_end_stack = [UTCDateTime(1970,1,1) for i in xrange(self.npts)] self.last_common_end_max = UTCDateTime(1970,1,1) def _register_preprocessing(self, waveloc_options): wo=waveloc_options # if this is a synthetic if wo.is_syn: # do dummy processing only for rtt in self.obs_rt_list: rtt.registerRtProcess('scale', factor=1.0) else: # get gaussian filtering parameters f0, sigma, dt = wo.gauss_filter gauss, self.filter_shift = gaussian_filter(f0, sigma, dt) # get kwin # for now just use one window kwin = wo.opdict['kwin'] # register pre-processing of data here for rtt in self.obs_rt_list: rtt.registerRtProcess('convolve', conv_signal=gauss) rtt.registerRtProcess('sw_kurtosis', win=kwin) rtt.registerRtProcess('boxcar', width=50) rtt.registerRtProcess('differentiate') rtt.registerRtProcess('neg_to_zero') def updateData(self, tr_list): """ Adds a list of traces (one per station) to the system """ t_copy=0.0 t_append=0.0 t_append_proc=0.0 t0_update=time.time() for tr in tr_list: if (self.dt!=tr.stats.delta): msg = 'Value of dt from options file %.2f does not match dt from data %2f'%(self.dt, tr.stats.delta) raise ValueError() # pre-correct for filter_shift #tr.stats.starttime -= np.round(self.filter_shift/self.dt) * self.dt tr.stats.starttime -= self.filter_shift sta=tr.stats.station ista=self.sta_list.index(sta) # make dtype of data float if it is not already tr.data=tr.data.astype(np.float32) t0=time.time() pp_data = self.obs_rt_list[ista].append(tr, gap_overlap_check = True) t_append_proc += time.time() - t0 # loop over points for ip in xrange(self.npts): # do time shift and append t0=time.time() pp_data_tmp = pp_data.copy() t_copy += time.time() - t0 pp_data_tmp.stats.starttime -= np.round(self.ttimes_matrix[ista,ip]/self.dt) * self.dt t0=time.time() self.point_rt_list[ip][ista].append(pp_data_tmp, gap_overlap_check = True) t_append += time.time() - t0 print "In updateData : %.2f s in process and %.2f s in data copy and %.2f s in append and a total of %.2f s" % (t_append_proc, t_copy, t_append, time.time()-t0_update) def updateStacks(self): npts=self.npts for ip in xrange(npts): self._updateStack(ip) def _updateStack(self,ip): UTCDateTime.DEFAULT_PRECISION=2 nsta=self.nsta # get common start-time for this point common_start=max([self.point_rt_list[ip][ista].stats.starttime \ for ista in xrange(nsta)]) common_start=max(common_start,self.last_common_end_stack[ip]) # get list of stations for which the end-time is compatible # with the common_start time and the safety buffer ista_ok=[ista for ista in xrange(nsta) if (self.point_rt_list[ip][ista].stats.endtime - common_start) > self.safety_margin] # get common end-time common_end=min([ self.point_rt_list[ip][ista].stats.endtime for ista in ista_ok]) self.last_common_end_stack[ip]=common_end+self.dt # stack c_list=[] for ista in ista_ok: tr=self.point_rt_list[ip][ista].copy() tr.trim(common_start, common_end) c_list.append(np.array(tr.data[:])) tr_common=np.vstack(c_list) # prepare trace for passing up stack_data = np.sum(tr_common, axis=0) stats={'station':'STACK', 'npts':len(stack_data), 'delta':self.dt, \ 'starttime':common_start} tr=Trace(data=stack_data,header=stats) #import pdb; pdb.set_trace() # append to appropriate stack_list self.stack_list[ip].append(tr, gap_overlap_check = True) def updateMax(self): npts=self.npts nsta=self.nsta # now extract maximum etc from stacks # get common start-time for this point common_start=max([self.stack_list[ip].stats.starttime \ for ip in xrange(npts)]) common_start=max(common_start,self.last_common_end_max) # get list of points for which the end-time is compatible # with the common_start time and the safety buffer ip_ok=[ip for ip in xrange(npts) if (self.stack_list[ip].stats.endtime - common_start) > self.safety_margin] common_end=min([self.stack_list[ip].stats.endtime for ip in ip_ok ]) self.last_common_end_max=common_end+self.dt # stack c_list=[] for ip in ip_ok: tr=self.stack_list[ip].copy() tr.trim(common_start, common_end) c_list.append(tr.data) tr_common=np.vstack(c_list) # get maximum and the corresponding point max_data = np.max(tr_common, axis=0) argmax_data = np.argmax(tr_common, axis=0) # prepare traces for passing up # max stats={'station':'Max', 'npts':len(max_data), 'delta':self.dt, \ 'starttime':common_start} tr_max=Trace(data=max_data,header=stats) self.max_out.append(tr_max, gap_overlap_check = True) # x coordinate stats['station'] = 'xMax' tr_x=Trace(data=self.x[argmax_data],header=stats) self.x_out.append(tr_x, gap_overlap_check = True) # y coordinate stats['station'] = 'yMax' tr_y=Trace(data=self.y[argmax_data],header=stats) self.y_out.append(tr_y, gap_overlap_check = True) # z coordinate stats['station'] = 'zMax' tr_z=Trace(data=self.z[argmax_data],header=stats) self.z_out.append(tr_z, gap_overlap_check = True)