def test_triggerOnset(self): """ Test trigger onset function """ on_of = np.array([[6.0, 31], [69, 94], [131, 181], [215, 265], [278, 315], [480, 505], [543, 568], [605, 631]]) cft = np.concatenate((np.sin(np.arange(0, 5 * np.pi, 0.1)) + 1, np.sin(np.arange(0, 5 * np.pi, 0.1)) + 2.1, np.sin(np.arange(0, 5 * np.pi, 0.1)) + 0.4, np.sin(np.arange(0, 5 * np.pi, 0.1)) + 1)) picks = triggerOnset(cft, 1.5, 1.0, max_len=50) np.testing.assert_array_equal(picks, on_of) # check that max_len_delete drops the picks picks_del = triggerOnset(cft, 1.5, 1.0, max_len=50, max_len_delete=True) np.testing.assert_array_equal(picks_del, on_of[np.array([0, 1, 5, 6])]) # # set True for visual understanding the tests if False: # pragma: no cover import matplotlib.pyplot as plt plt.plot(cft) plt.hlines([1.5, 1.0], 0, len(cft)) on_of = np.array(on_of) plt.vlines(picks[:, 0], 1.0, 2.0, color='g', linewidth=2, label="ON max_len") plt.vlines(picks[:, 1], 0.5, 1.5, color='r', linewidth=2, label="OF max_len") plt.vlines(picks_del[:, 0] + 2, 1.0, 2.0, color='y', linewidth=2, label="ON max_len_delete") plt.vlines(picks_del[:, 1] + 2, 0.5, 1.5, color='b', linewidth=2, label="OF max_len_delete") plt.legend() plt.show()
def trig(self,slta): #get values for sta and lta tas = slta.split(' ') ON = eval(tas[2]) OFF = eval(tas[3]) for i in range(len(self)): pic = triggerOnset(self[i].data, ON, OFF) ooo = str() for j in range(len(pic)): ooo = ooo + str(pic[j]) self[i].stats['trigger_on'] = ON self[i].stats['trigger_off'] = OFF self[i].stats['picks'] = ooo return self
print "Cannot process station %s, no RESP file given" % tr.stats.station continue # Cannot process a whole day file, split it in smaller junks overlap = s2p(30.0, tr) olap = overlap samp = 0 df = tr.stats.sampling_rate if trId(tr.stats)[1] != last_id or tr.stats.starttime - last_endtime > 1.0 / df: data_buf = np.array([], dtype='float64') olap = 0 while samp < tr.stats.npts: data = tr.data[samp:samp + nfft - olap].astype('float64') data = np.concatenate((data_buf, data)) data = detrend(data) # Correct for frequency response of instrument data = seisSim(data, tr.stats.sampling_rate, paz, inst_sim=inst) data /= (paz['sensitivity'] / 1e9) #V/nm/s correct for overall sensitivity data = recStalta(data, s2p(2.5, tr), s2p(10.0, tr)) picked_values = triggerOnset(data, 3.0, 0.5, max_len=overlap) # for i, j in picked_values: begin = tr.stats.starttime + float(i + samp - olap) / df end = tr.stats.starttime + float(j + samp - olap) / df f.write("%s,%s,%s\n" % (str(begin), str(end), tr.stats.station)) olap = overlap # only needed for first time in loop samp += nfft - overlap data_buf = data[-overlap:] print '.', # Progress Bar last_endtime, last_id = trId(tr.stats) f.close()
olap = overlap samp = 0 df = tr.stats.sampling_rate if trId( tr.stats )[1] != last_id or tr.stats.starttime - last_endtime > 1.0 / df: data_buf = np.array([], dtype='float64') olap = 0 while samp < tr.stats.npts: data = tr.data[samp:samp + nfft - olap].astype('float64') data = np.concatenate((data_buf, data)) data = detrend(data) # Correct for frequency response of instrument data = seisSim(data, tr.stats.sampling_rate, paz, inst_sim=inst) data /= (paz['sensitivity'] / 1e9 ) #V/nm/s correct for overall sensitivity data = recStalta(data, s2p(2.5, tr), s2p(10.0, tr)) picked_values = triggerOnset(data, 3.0, 0.5, max_len=overlap) # for i, j in picked_values: begin = tr.stats.starttime + float(i + samp - olap) / df end = tr.stats.starttime + float(j + samp - olap) / df f.write("%s,%s,%s\n" % (str(begin), str(end), tr.stats.station)) olap = overlap # only needed for first time in loop samp += nfft - overlap data_buf = data[-overlap:] print '.', # Progress Bar last_endtime, last_id = trId(tr.stats) f.close()
olap = 0 while samp < tr.stats.npts: data = tr.data[samp:samp + nfft - olap].astype('float64') data = np.concatenate((data_buf, data)) data = detrend(data) # Correct for frequency response of instrument data = seisSim(data, df, paz_remove=tr.stats.paz, paz_simulate=inst, remove_sensitivity=True) # XXX is removed in seisSim... ?! # XXX data /= (paz['sensitivity'] / 1e9) #V/nm/s correct for overall sensitivity data = bandpass(data, LOW, HIGH, df) data = recStalta(data, s2p(STA, tr), s2p(LTA, tr)) picked_values = triggerOnset(data, ON, OFF, max_len=overlap) # for i, j in picked_values: begin = tr.stats.starttime + float(i + samp - olap) / df end = tr.stats.starttime + float(j + samp - olap) / df trigger_list.append( (begin.timestamp, end.timestamp, tr.stats.station)) olap = overlap # only needed for first time in loop samp += nfft - overlap data_buf = data[-overlap:] last_endtime, last_id = trId(tr.stats) ############################################################################### # start of coincidence part ############################################################################### trigger_list.sort()
tr.data = detrend(tr.data) st.simulate(paz_remove="self", paz_simulate=cornFreq2Paz(1.0), remove_sensitivity=False) st.sort() st_trigger = st.copy() st_trigger.filter("bandpass", freqmin=PAR.LOW, freqmax=PAR.HIGH, corners=1, zerophase=True) st.trim(T1, T2) st_trigger.trim(T1, T2) st_trigger.trigger("recstalta", sta=PAR.STA, lta=PAR.LTA) summary.append(str(st)) # do the triggering trigger_list = [] for tr in st_trigger: tr.stats.channel = "recstalta" max_len = PAR.MAXLEN * tr.stats.sampling_rate trigger_sample_list = triggerOnset(tr.data, PAR.ON, PAR.OFF, max_len=max_len) for on, off in trigger_sample_list: begin = tr.stats.starttime + float(on) / tr.stats.sampling_rate end = tr.stats.starttime + float(off) / tr.stats.sampling_rate trigger_list.append((begin.timestamp, end.timestamp, tr.stats.station)) trigger_list.sort() # merge waveform and trigger stream for plotting # the normalizations are done because the triggers have a completely different # scale and would not be visible in the plot otherwise... st.filter("bandpass", freqmin=1.0, freqmax=20.0, corners=1, zerophase=True) st.normalize(global_max=False) st_trigger.normalize(global_max=True) st.extend(st_trigger) # coincidence part, work through sorted trigger list...
samp = 0 df = tr.stats.sampling_rate if trId(tr.stats)[1] != last_id or tr.stats.starttime - last_endtime > 1.0 / df: data_buf = np.array([], dtype='float64') olap = 0 while samp < tr.stats.npts: data = tr.data[samp:samp + nfft - olap].astype('float64') data = np.concatenate((data_buf, data)) data = detrend(data) # Correct for frequency response of instrument data = seisSim(data, df, paz_remove=tr.stats.paz, paz_simulate=inst, remove_sensitivity=True) # XXX is removed in seisSim... ?! # XXX data /= (paz['sensitivity'] / 1e9) #V/nm/s correct for overall sensitivity data = bandpass(data, LOW, HIGH, df) data = recStalta(data, s2p(STA, tr), s2p(LTA, tr)) picked_values = triggerOnset(data, ON, OFF, max_len=overlap) # for i, j in picked_values: begin = tr.stats.starttime + float(i + samp - olap) / df end = tr.stats.starttime + float(j + samp - olap) / df trigger_list.append((begin.timestamp, end.timestamp, tr.stats.station)) olap = overlap # only needed for first time in loop samp += nfft - overlap data_buf = data[-overlap:] last_endtime, last_id = trId(tr.stats) ############################################################################### # start of coincidence part ############################################################################### trigger_list.sort() #print [(UTCDateTime(i[0]).isoformat(), UTCDateTime(i[1]).isoformat(), i[2]) for i in trigger_list]
freqmin=PAR.LOW, freqmax=PAR.HIGH, corners=1, zerophase=True) st.trim(T1, T2) st_trigger.trim(T1, T2) st_trigger.trigger("recstalta", sta=PAR.STA, lta=PAR.LTA) summary.append(str(st)) # do the triggering trigger_list = [] for tr in st_trigger: tr.stats.channel = "recstalta" max_len = PAR.MAXLEN * tr.stats.sampling_rate trigger_sample_list = triggerOnset(tr.data, PAR.ON, PAR.OFF, max_len=max_len) for on, off in trigger_sample_list: begin = tr.stats.starttime + float(on) / tr.stats.sampling_rate end = tr.stats.starttime + float(off) / tr.stats.sampling_rate trigger_list.append( (begin.timestamp, end.timestamp, tr.stats.station)) trigger_list.sort() # merge waveform and trigger stream for plotting # the normalizations are done because the triggers have a completely different # scale and would not be visible in the plot otherwise... st.filter("bandpass", freqmin=1.0, freqmax=20.0, corners=1, zerophase=True) st.normalize(global_max=False) st_trigger.normalize(global_max=True) st.extend(st_trigger)