Ejemplo n.º 1
0
def update_cft(prev_val, selected=None):
    print('{} ({})'.format(
        ticker_alg.value,
        STALTA_ALGORITHMS[ticker_alg.value]['name']))  # print algorithm used
    if STALTA_ALGORITHMS[ticker_alg.value]['implemented']:

        print('')
        print(st)
        print('')

        from stalta_tuner.trigger import coincidence_trigger
        cft, triggers = coincidence_trigger(
            STALTA_ALGORITHMS[ticker_alg.value]
            ['name'],  # Converts human-readable algorithm name to obspy algorithm type
            trigger_slider.value[1],
            trigger_slider.value[0],  # threshold for on/off value of the cft
            st,  # stream object # stream for computing data
            settings[
                'ntriggersta'],  # thr_coincidence_sum : number of stations required to have detection
            sta=stalta_slider.value[0],
            lta=stalta_slider.value[1]  # sta/lta windows
        )
        print('{} Stations required: {} triggers'.format(
            settings['ntriggersta'], len(triggers)))  # print results
        print('')

        i = 0
        for p in cft_plots:
            sourcelist_cft[i].data = dict(times=num2date(
                cft[i].times('matplotlib')),
                                          cft=cft[i].data)
            trig_on_thresh[i].location = trigger_slider.value[1]
            trig_off_thresh[i].location = trigger_slider.value[0]
            i += 1
        #cft_plots = plotting.cft_multiplot(sourcelist_cft, cft_thresh=[stalta_slider.value[0], stalta_slider.value[1]] )

        triggert = utils.trigtimes(triggers)
        source_triggers.data = dict(ontimes=triggert,
                                    y=np.zeros(triggert.shape))

        # Change setting for minimum and maximum Trigger On/Off
        cft_min = []
        cft_max = []
        for c in cft:
            c = c.data[
                100:]  # eliminate the first 100 samples bc those are goofy
            cft_min.append(
                (min(c) // 1) + min(c) % 1 * 10 // 1 / 10 -
                .1)  # weird way to do rounding (probably an easier way)
            cft_max.append(
                (max(c) // 1) + max(c) % 1 * 10 // 1 / 10 +
                .1)  # weird way to do rounding (probably an easier way)
            #print( (max(c.data)//1) + max(c.data)%1*10//1/10+.1 )
        print('new CFT min/max: {},{}'.format(min(cft_min), max(cft_max)))

    else:
        print(ticker_alg.value + ' is not yet implemented.')
        ticker_alg.value = prev_val
Ejemplo n.º 2
0
def network_detection(st, cft_return=True):
    # TODO: Dynamic threshold method of Akram 2013

    fs = st[0].stats.sampling_rate
    sta_len_sec = 2.5 * DOM_PERIOD  # 2-3 times dominant period
    lta_len_sec = 7.5 * sta_len_sec  # 5-10 times STA
    nsta = int(sta_len_sec * fs)
    nlta = int(lta_len_sec * fs)
    on_thresh = 3.0  # 3.5
    off_thresh = 0.5
    numsta = len(list(set([tr.stats.station for tr in st])))
    min_chans = numsta * 2  # Minimum number of channels to log network detection

    cft_stream = Stream()
    if cft_return:
        for i, tr in enumerate(st.traces):
            cft = recursive_sta_lta(tr.data, nsta=nsta, nlta=nlta)
            # cft = eps_smooth(cft, w=int(EPS_WINLEN * Fs))
            cft_stream += Trace(data=cft, header=tr.stats)
        detection_list = coincidence_trigger(None,
                                             on_thresh,
                                             off_thresh,
                                             cft_stream,
                                             thr_coincidence_sum=min_chans,
                                             max_trigger_length=2.0,
                                             delete_long_trigger=True,
                                             details=True)

    else:
        detection_list = coincidence_trigger('recstalta',
                                             on_thresh,
                                             off_thresh,
                                             st,
                                             sta=sta_len_sec,
                                             lta=lta_len_sec,
                                             thr_coincidence_sum=min_chans,
                                             max_trigger_length=2.0,
                                             delete_long_trigger=True,
                                             details=True)
    # Dictionary keys:
    # time, stations, trace_ids, coincidence_sum, cft_peaks, cft_stds, duration, cft_wmean, cft_std_wmean
    return detection_list, cft_stream
Ejemplo n.º 3
0
def network_detect(stream,
                   sta,
                   lta,
                   on_thresh,
                   off_thresh=None,
                   min_chans=8,
                   min_sep=3.,
                   max_trigger_length=3.0):
    if not off_thresh:
        off_thresh = 0.5 * on_thresh
    print("STA = %f, LTA = %f, on_thresh = %f, off_thresh = %f" %
          (sta, lta, on_thresh, off_thresh))

    # Run detector
    detection_list = coincidence_trigger('recstalta',
                                         on_thresh,
                                         off_thresh,
                                         stream,
                                         sta=sta,
                                         lta=lta,
                                         thr_coincidence_sum=min_chans,
                                         max_trigger_length=max_trigger_length)

    print("%d detections" % len(detection_list))
    numsta = list(set([tr.stats.station for tr in stream]))

    # Write file
    fname = "detections_sta%3.2f_lta%3.2f_on%2.1f_off%2.1f.list" % (
        sta, lta, on_thresh, off_thresh)
    with open(fname, "w") as outfile:
        outfile.write(
            "STA = %f, LTA = %f, on_thresh = %f, off_thresh = %f, min_chans = %d, min_sep = %f, max_trigger_length = %f\n"
            % (sta, lta, on_thresh, off_thresh, min_chans, min_sep,
               max_trigger_length))
        for detection in detection_list:
            outfile.write("%s\n" %
                          detection["time"].strftime("%Y-%m-%d %H:%M:%S"))
Ejemplo n.º 4
0
def update_cft(prev_val, selected=None):
    print(ticker_alg.value)
    print(STALTA_ALGORITHMS[ticker_alg.value]['name'])
    if STALTA_ALGORITHMS[ticker_alg.value]['implemented']:
        from trigger import coincidence_trigger

        #print(st)
        #print(trigger_slider.value)
        #print(stalta_slider.value)

        cft, triggers = coincidence_trigger(
            STALTA_ALGORITHMS[ticker_alg.value]
            ['name'],  # Converts human-readable algorithm name to obspy algorithm type
            trigger_slider.value[1],
            trigger_slider.value[0],  # threshold for on/off value of the cft
            st,  # stream object
            ntriggersta,  # thr_coincidence_sum : number of stations required to have detection
            sta=stalta_slider.value[0],
            lta=stalta_slider.value[1]  # sta/lta windows
        )
        print('# Required Stations: {}'.format(ntriggersta))
        print('Number of Triggers: {}'.format(len(triggers)))
        print('')
        print(max(cft[0].data))
        print('')
        #print(triggers)
        #print('')

        triggert = utils.trigtimes(triggers)
        source_triggers.data = dict(ontimes=triggert,
                                    y=np.zeros(triggert.shape))
        for c in cft:
            source_stalta.data = dict(times=c.times(), cft=c.data)

    else:
        print(ticker_alg.value + ' is not yet implemented.')
        ticker_alg.value = prev_val
Ejemplo n.º 5
0
def vibbox_trigger(st, freqmin=1000, freqmax=15000, sta = 0.01, lta = 0.05, on = 1.3, off = 1, num = 10):
    starttime = st[0].stats.starttime
    st = st[0:63] # throw out time signal
    cassm = st[61].copy().differentiate()
    st.filter('bandpass', freqmin=freqmin, freqmax=freqmax)
    trig = coincidence_trigger("recstalta", on,  off, st, num, sta=sta, lta=lta)
    st.trigger('recstalta', sta=sta, lta=lta)
    ids = map(lambda d: d.id, st)
    columns = ['time', 'duration'] + ids
    df_triggers = pd.DataFrame(columns=columns)
    for ii in range(len(trig)):
        if 'SV.CTrig..' in trig[ii]['trace_ids']: # trow out CASSM shots
            continue
        if trig[ii]['duration'] > 0.1: # spurious triggers at beginning of file, should be done differently
            continue
        trig_sample = np.int((obspy.UTCDateTime(trig[ii]['time']) - starttime)*st[0].stats.sampling_rate)
        # check if CASSM trigger fired
        if np.max(cassm.data[(trig_sample-1000):(1000+trig_sample+np.int(trig[ii]['duration']*st[0].stats.sampling_rate))]) > 10000:
            continue
        current_trigger = {'time': trig[ii]['time'], 'duration': trig[ii]['duration']}
        for jj in range(60):
            current_trigger[st[jj].id] = np.max(st[jj].data[trig_sample:trig_sample+np.int(trig[ii]['duration']*st[0].stats.sampling_rate)])
        df_triggers = df_triggers.append(current_trigger, ignore_index=True)
    return df_triggers
Ejemplo n.º 6
0
    def test_correlate_stream_template_and_correlation_detector(self):
        template = read().filter('highpass', freq=5).normalize()
        pick = UTCDateTime('2009-08-24T00:20:07.73')
        template.trim(pick, pick + 10)
        n1 = len(template[0])
        n2 = 100 * 3600  # 1 hour
        dt = template[0].stats.delta
        # shift one template Trace
        template[1].stats.starttime += 5
        stream = template.copy()
        np.random.seed(42)
        for tr, trt in zip(stream, template):
            tr.stats.starttime += 24 * 3600
            tr.data = np.random.random(n2) - 0.5  # noise
            if tr.stats.channel[-1] == 'Z':
                tr.data[n1:2 * n1] += 10 * trt.data
                tr.data = tr.data[:-n1]
            tr.data[5 * n1:6 * n1] += 100 * trt.data
            tr.data[20 * n1:21 * n1] += 2 * trt.data
        # make one template trace a bit shorter
        template[2].data = template[2].data[:-n1 // 5]
        # make two stream traces a bit shorter
        stream[0].trim(5, None)
        stream[1].trim(1, 20)
        # second template
        pick2 = stream[0].stats.starttime + 20 * n1 * dt
        template2 = stream.slice(pick2 - 5, pick2 + 5)
        # test cross correlation
        stream_orig = stream.copy()
        template_orig = template.copy()
        ccs = correlate_stream_template(stream, template)
        self.assertEqual(len(ccs), len(stream))
        self.assertEqual(stream[1].stats.starttime, ccs[0].stats.starttime)
        self.assertEqual(stream_orig, stream)
        self.assertEqual(template_orig, template)
        # test if traces with not matching seed ids are discarded
        ccs = correlate_stream_template(stream[:2], template[1:])
        self.assertEqual(len(ccs), 1)
        self.assertEqual(stream_orig, stream)
        self.assertEqual(template_orig, template)
        # test template_time parameter
        ccs1 = correlate_stream_template(stream, template)
        template_time = template[0].stats.starttime + 100
        ccs2 = correlate_stream_template(stream,
                                         template,
                                         template_time=template_time)
        self.assertEqual(len(ccs2), len(ccs1))
        delta = ccs2[0].stats.starttime - ccs1[0].stats.starttime
        self.assertAlmostEqual(delta, 100)
        # test if all three events found
        detections, sims = correlation_detector(stream, template, 0.2, 30)
        self.assertEqual(len(detections), 3)
        dtime = pick + n1 * dt + 24 * 3600
        self.assertAlmostEqual(detections[0]['time'], dtime)
        self.assertEqual(len(sims), 1)
        self.assertEqual(stream_orig, stream)
        self.assertEqual(template_orig, template)
        # test if xcorr stream is suitable for coincidence_trigger
        # result should be the same, return values related
        ccs = correlate_stream_template(stream, template)
        triggers = coincidence_trigger(None,
                                       0.2,
                                       -1,
                                       ccs,
                                       2,
                                       max_trigger_length=30,
                                       details=True)
        self.assertEqual(len(triggers), 2)
        for d, t in zip(detections[1:], triggers):
            self.assertAlmostEqual(np.mean(t['cft_peaks']), d['similarity'])
        # test template_magnitudes
        detections, _ = correlation_detector(stream,
                                             template,
                                             0.2,
                                             30,
                                             template_magnitudes=1)
        self.assertAlmostEqual(detections[1]['amplitude_ratio'], 100, delta=1)
        self.assertAlmostEqual(detections[1]['magnitude'],
                               1 + 8 / 3,
                               delta=0.01)
        self.assertAlmostEqual(detections[2]['amplitude_ratio'], 2, delta=2)
        detections, _ = correlation_detector(stream,
                                             template,
                                             0.2,
                                             30,
                                             template_magnitudes=True)
        self.assertAlmostEqual(detections[1]['amplitude_ratio'], 100, delta=1)
        self.assertNotIn('magnitude', detections[1])
        self.assertEqual(stream_orig, stream)
        self.assertEqual(template_orig, template)
        # test template names
        detections, _ = correlation_detector(stream,
                                             template,
                                             0.2,
                                             30,
                                             template_names='eq')
        self.assertEqual(detections[0]['template_name'], 'eq')
        detections, _ = correlation_detector(stream,
                                             template,
                                             0.2,
                                             30,
                                             template_names=['eq'],
                                             plot=True)
        self.assertEqual(detections[0]['template_name'], 'eq')

        # test similarity parameter with additional constraints
        # test details=True

        def simf(ccs):
            ccmatrix = np.array([tr.data for tr in ccs])
            comp_thres = np.sum(ccmatrix > 0.2, axis=0) > 1
            similarity = ccs[0].copy()
            similarity.data = np.mean(ccmatrix, axis=0) * comp_thres
            return similarity

        detections, _ = correlation_detector(stream,
                                             template,
                                             0.1,
                                             30,
                                             similarity_func=simf,
                                             details=True)
        self.assertEqual(len(detections), 2)
        for d in detections:
            self.assertAlmostEqual(np.mean(list(d['cc_values'].values())),
                                   d['similarity'])
        # test if properties from find_peaks function are returned
        detections, sims = correlation_detector(stream,
                                                template,
                                                0.1,
                                                30,
                                                threshold=0.16,
                                                details=True,
                                                similarity_func=simf)
        try:
            from scipy.signal import find_peaks  # noqa
        except ImportError:
            self.assertEqual(len(detections), 2)
            self.assertNotIn('left_threshold', detections[0])
        else:
            self.assertEqual(len(detections), 1)
            self.assertIn('left_threshold', detections[0])
        # also check the _find_peaks function
        distance = int(round(30 / sims[0].stats.delta))
        indices = _find_peaks(sims[0].data, 0.1, distance, distance)
        self.assertEqual(len(indices), 2)
        # test distance parameter
        detections, _ = correlation_detector(stream, template, 0.2, 500)
        self.assertEqual(len(detections), 1)
        # test more than one template
        # just 2 detections for first template, because second template has
        # a higher similarity for third detection
        templates = (template, template2)
        templatetime2 = pick2 - 10
        template_times = (template[0].stats.starttime, templatetime2)
        detections, _ = correlation_detector(stream,
                                             templates, (0.2, 0.3),
                                             30,
                                             plot=stream,
                                             template_times=template_times,
                                             template_magnitudes=(2, 5))
        self.assertGreater(len(detections), 0)
        self.assertIn('template_id', detections[0])
        detections0 = [d for d in detections if d['template_id'] == 0]
        self.assertEqual(len(detections0), 2)
        self.assertEqual(len(detections), 3)
        self.assertAlmostEqual(detections[2]['similarity'], 1)
        self.assertAlmostEqual(detections[2]['magnitude'], 5)
        self.assertEqual(detections[2]['time'], templatetime2)
        # test if everything is correct if template2 and stream do not have
        # any ids in common
        templates = (template, template2[2:])
        with warnings.catch_warnings():
            warnings.simplefilter("ignore")
            detections, sims = correlation_detector(
                stream[:1],
                templates,
                0.2,
                30,
                plot=True,
                template_times=templatetime2,
                template_magnitudes=2)
        detections0 = [d for d in detections if d['template_id'] == 0]
        self.assertEqual(len(detections0), 3)
        self.assertEqual(len(detections), 3)
        self.assertEqual(len(sims), 2)
        self.assertIsInstance(sims[0], Trace)
        self.assertIs(sims[1], None)
Ejemplo n.º 7
0
 def test_coincidenceTriggerWithSimilarityChecking(self):
     """
     Test network coincidence trigger with cross correlation similarity
     checking of given event templates.
     """
     st = Stream()
     files = ["BW.UH1._.SHZ.D.2010.147.cut.slist.gz",
              "BW.UH2._.SHZ.D.2010.147.cut.slist.gz",
              "BW.UH3._.SHZ.D.2010.147.cut.slist.gz",
              "BW.UH3._.SHN.D.2010.147.cut.slist.gz",
              "BW.UH3._.SHE.D.2010.147.cut.slist.gz",
              "BW.UH4._.EHZ.D.2010.147.cut.slist.gz"]
     for filename in files:
         filename = os.path.join(self.path, filename)
         st += read(filename)
     # some prefiltering used for UH network
     st.filter('bandpass', freqmin=10, freqmax=20)
     # set up template event streams
     times = ["2010-05-27T16:24:33.095000", "2010-05-27T16:27:30.370000"]
     templ = {}
     for t in times:
         t = UTCDateTime(t)
         st_ = st.select(station="UH3").slice(t, t + 2.5).copy()
         templ.setdefault("UH3", []).append(st_)
     times = ["2010-05-27T16:27:30.574999"]
     for t in times:
         t = UTCDateTime(t)
         st_ = st.select(station="UH1").slice(t, t + 2.5).copy()
         templ.setdefault("UH1", []).append(st_)
     trace_ids = {"BW.UH1..SHZ": 1,
                  "BW.UH2..SHZ": 1,
                  "BW.UH3..SHZ": 1,
                  "BW.UH4..EHZ": 1}
     similarity_thresholds = {"UH1": 0.8, "UH3": 0.7}
     with warnings.catch_warnings(record=True) as w:
         # avoid getting influenced by the warning filters getting set up
         # differently in obspy-runtests.
         # (e.g. depending on options "-v" and "-q")
         warnings.resetwarnings()
         trig = coincidence_trigger(
             "classicstalta", 5, 1, st.copy(), 4, sta=0.5, lta=10,
             trace_ids=trace_ids, event_templates=templ,
             similarity_threshold=similarity_thresholds)
         # two warnings get raised
         self.assertEqual(len(w), 2)
     # check floats in resulting dictionary separately
     self.assertAlmostEqual(trig[0].pop('duration'), 3.9600000381469727)
     self.assertAlmostEqual(trig[1].pop('duration'), 1.9900000095367432)
     self.assertAlmostEqual(trig[2].pop('duration'), 1.9200000762939453)
     self.assertAlmostEqual(trig[3].pop('duration'), 3.9200000762939453)
     self.assertAlmostEqual(trig[0]['similarity'].pop('UH1'), 0.94149447384)
     self.assertAlmostEqual(trig[0]['similarity'].pop('UH3'), 1)
     self.assertAlmostEqual(trig[1]['similarity'].pop('UH1'), 0.65228204570)
     self.assertAlmostEqual(trig[1]['similarity'].pop('UH3'), 0.72679293429)
     self.assertAlmostEqual(trig[2]['similarity'].pop('UH1'), 0.89404458774)
     self.assertAlmostEqual(trig[2]['similarity'].pop('UH3'), 0.74581409371)
     self.assertAlmostEqual(trig[3]['similarity'].pop('UH1'), 1)
     self.assertAlmostEqual(trig[3]['similarity'].pop('UH3'), 1)
     remaining_results = \
         [{'coincidence_sum': 4.0,
           'similarity': {},
           'stations': ['UH3', 'UH2', 'UH1', 'UH4'],
           'time': UTCDateTime(2010, 5, 27, 16, 24, 33, 210000),
           'trace_ids': ['BW.UH3..SHZ', 'BW.UH2..SHZ', 'BW.UH1..SHZ',
                         'BW.UH4..EHZ']},
          {'coincidence_sum': 3.0,
           'similarity': {},
           'stations': ['UH3', 'UH1', 'UH2'],
           'time': UTCDateTime(2010, 5, 27, 16, 25, 26, 710000),
           'trace_ids': ['BW.UH3..SHZ', 'BW.UH1..SHZ', 'BW.UH2..SHZ']},
          {'coincidence_sum': 3.0,
           'similarity': {},
           'stations': ['UH2', 'UH1', 'UH3'],
           'time': UTCDateTime(2010, 5, 27, 16, 27, 2, 260000),
           'trace_ids': ['BW.UH2..SHZ', 'BW.UH1..SHZ', 'BW.UH3..SHZ']},
          {'coincidence_sum': 4.0,
           'similarity': {},
           'stations': ['UH3', 'UH2', 'UH1', 'UH4'],
           'time': UTCDateTime(2010, 5, 27, 16, 27, 30, 510000),
           'trace_ids': ['BW.UH3..SHZ', 'BW.UH2..SHZ', 'BW.UH1..SHZ',
                         'BW.UH4..EHZ']}]
     self.assertEqual(trig, remaining_results)
Ejemplo n.º 8
0
# Read data
data = "../data/%s/%s.*.*.SON.*.*.BH*.R.SAC" %(tanggal.strftime('%Y%j'),tanggal.strftime('%Y.%j.%H'))
st = read(data,format='SAC') 

# Select Z component
st_cp = st.copy()
st_Z = st_cp.select(component="Z")

# Bandpass filter
df = st_Z[0].stats.sampling_rate
st_Z.taper(max_percentage=0.05, type='hann', max_length=None, side='both')
st_Z.filter("bandpass", freqmin=1, freqmax=20, corners=4, zerophase=False)

# Earthquake detection
triggers = coincidence_trigger("recstalta", 10, 2, st_Z, 3, sta=0.5, lta=10, details=True) 
# Trigger type=recursive STA/LTA
# Threshold on=10s, off=2s
# Stream=st_Z
# Threshold for coincidence sum=3
# STA=0.5s, LTA=10s
print(len(triggers), "events triggered.")

# Output phase file
time=st[0].stats.starttime
phase_file = '../pick/%s.pick'%(time.strftime('%Y%m%d_%H%M%S'))
f_phase = open(phase_file, 'w')

# Pick P and S arrivals with an AR-AIC + STA/LTA algorithm
for trigger in triggers:
    st_trig = Stream()
Ejemplo n.º 9
0
def trigger(st, stC, rtable, opt):

    """
    Run triggering algorithm on a stream of data.

    st: OBSPy stream of data
    rtable: Repeater table contains reference time of previous trigger in samples
    opt: Options object describing station/run parameters

    Returns triggered traces as OBSPy trace object updates ptime for next run 
    """
    
    tr = st[0]
    t = tr.stats.starttime

    cft = coincidence_trigger("classicstalta", opt.trigon, opt.trigoff, stC, opt.nstaC,
        sta=opt.swin, lta=opt.lwin, details=True)
    if len(cft) > 0:
        
        ind = 0
        
        # Slice out the data from st and save the maximum STA/LTA ratio value for
        # use in orphan expiration
        
        # Convert ptime from time of last trigger to seconds before start time
        if rtable.attrs.ptime:
            ptime = (UTCDateTime(rtable.attrs.ptime) - t)
        else:
            ptime = -opt.mintrig
        
        for n in range(len(cft)):
                    
            ttime = cft[n]['time'] # This is a UTCDateTime, not samples
            
            if (ttime >= t + opt.atrig) and (ttime >= t + ptime +
                opt.mintrig) and (ttime < t + len(tr.data)/opt.samprate -
                2*opt.atrig):
                
                ptime = ttime - t
                
                # Slice and save as first trace              
                ttmp = st.slice(ttime - opt.ptrig, ttime + opt.atrig)
                ttmp[0].data = ttmp[0].data[0:opt.wshape] - np.mean(
                    ttmp[0].data[0:opt.wshape])
                for s in range(1,len(ttmp)):
                    ttmp[0].data = np.append(ttmp[0].data, ttmp[s].data[
                        0:opt.wshape] - np.mean(ttmp[s].data[0:opt.wshape]))
                ttmp[0].stats.maxratio = np.max(cft[n]['cft_peaks'])
                if ind is 0:
                    trigs = Stream(ttmp[0])
                    ind = ind+1
                else:
                    trigs = trigs.append(ttmp[0])
                                                         
        if ind is 0:
            return []
        else:
            rtable.attrs.ptime = (t + ptime).isoformat()
            return trigs
    else:
        return []
Ejemplo n.º 10
0
    try:
        tmp = client.get_waveforms("CH",
                                   station,
                                   "",
                                   "[EH]HZ",
                                   t,
                                   t2,
                                   metadata=True)
    except:
        print(station, "---")
        continue
    st += tmp

st.taper()
st.filter("bandpass", freqmin=1, freqmax=20)
triglist = coincidence_trigger("recstalta", 10, 2, st, 4, sta=0.5, lta=10)
print(len(triglist), "events triggered.")

for trig in triglist:
    closest_sta = trig['stations'][0]
    tr = st.select(station=closest_sta)[0]
    trig['latitude'] = tr.stats.coordinates.latitude
    trig['longitude'] = tr.stats.coordinates.longitude

paz_wa = {
    'sensitivity': 2800,
    'zeros': [0j],
    'gain': 1,
    'poles': [-6.2832 - 4.7124j, -6.2832 + 4.7124j]
}
Ejemplo n.º 11
0
def pickEvents(path1, path2, datatype, w_s, thres, coSumThres, dec, detrend,
               filt, **filt_kwargs):
    '''
    Loads in data, runs stalta function, and returns list of events using coinidence trigger
    INPUTS:
        path1: path to data for station 1 (stored in mseed format); type: str
        path2: path to data for station 2 (stored in mseed format); type: str
        datatype: 'MSEED' or 'SEGY' ; type: str
        
        w_s: [STA,LTA] window sizes in seconds; type: 2 entry array
        thres: [lower, upper] threshold where trigger is deactivated or activated; type: 2 entry array
        dec: factor to downsample the data by; type: int
        detrend: 'simple','linear','constant','polynomial','spline'; type: str
        filt: type of filter; 'bandpass','lowpass','highpass', etc.; type: str
        filt_kwargs: options depending on filter chosen; type: str
            if filt='bandpass': freqmin=lower cutoff frequency, freqmax=upper cutoff
            if filt='lowpass': freq=cutoff frequency
            if filt='highpass': freq=cutoff frequency
    RETURNS:
        events: list with [date/filename (str), event times (int)]
    '''

    if datatype == 'MSEED':
        dic1 = dayDic_MSEED(path1)
        dic2 = dayDic_MSEED(path2)
    elif datatype == 'SEGY':
        dic1 = dayDic_SEGY(path1)
        dic2 = dayDic_SEGY(path2)
    else:
        raise NameError('Not a valid file extention!')

    trig = {}

    # find predicted time for event to travel between stations
    tpred = latlong.main()

    ID = 0

    # loop through each day (each key in dictionary 1)
    for day in dic1:
        files1 = dic1.get(day)
        files2 = dic2.get(day)

        # if data doesn't exist for one of the stations, ignore the day
        if (files1 is None) or (files2 is None):
            continue

        # make empty stream
        data = Stream()

        for path in path1, path2:
            if path == path1: files = files1
            if path == path2: files = files2

            for file in files:

                try:
                    # load in the data for one files (one hour)
                    st = obs.core.read(join(path, file),
                                       datatype,
                                       byteorder='<')
                except:
                    # to catch weird segy files
                    print('Something is wrong with this file: ' + str(file))
                    continue

                # process (filter, decimate, detrend) the data
                dataProc = procData(st, dec, detrend, filt, **filt_kwargs)

                # store processed data
                data += dataProc

            if path == path1: data1 = data
            if path == path2: data2 = data

        # use coincidence triggering on processed data from both stations
        trigDay = tg.coincidence_trigger('recstalta',
                                         thres[1],
                                         thres[0],
                                         data1 + data2,
                                         coSumThres,
                                         sta=w_s[0],
                                         lta=w_s[1])

        # empty list for REAL events
        realTrigDay = []

        # property checking
        for event in trigDay:
            stations = event.get('stations')
            if stations[0] != 'MMTN':
                # 'event' occurred at WCYN first; not a real event
                continue

            time = event.get('time')
            if time[0].hour < 8 or time[0].hour > 18:
                # 'event' occurred before 8am or after 6pm; not a real event
                continue

            timeDiff = time[1] - time[0]  # time difference in seconds
            if timeDiff > (tpred + 5) or timeDiff < (tpred - 3):
                # 'event' did not reach the second station within predicted time
                #TODO: arbitrarially chose a window of 5/3s around predicted time
                continue

            # otherwise, this is a real event!
            realTrigDay.append(event)

            # make a 1 min window around event
            dt = datetime.timedelta(seconds=0, minutes=1)

            # save data in window around event
            event['data1'] = dataToSave(data1, time[0] - dt, time[0] + dt)
            event['data2'] = dataToSave(data2, time[1] - dt, time[1] + dt)

            # save event ID
            event['ID'] = ID
            ID += 1

        # append events from one day to dictionary
        trig[day] = realTrigDay
        print('Events today: ', len(realTrigDay))

    return trig
st = read("./data/mtcarmel.mseed")
st.filter("bandpass", freqmin=2, freqmax=10)
st.trigger(type="classicstalta", sta=1, lta=30)
st.plot()

# We could now manually compare trigger values on the different stations to find small aftershocks, termed a network coincidence trigger. However, there is a convenience function in ObsPy's signal toolbox to do just that in only a few lines of code.
#
# Read the data again and apply a bandpass to the dominant frequencies of the events. Use the `coincidence_trigger` function that returns a list of possible events (see the [ObsPy Tutorial](http://docs.obspy.org/tutorial/code_snippets/trigger_tutorial.html#network-coincidence-trigger-example) for an example of a recursive STA/LTA network coincidence trigger). Print the length of the list and adjust the trigger-on/off thresholds so that you get around 5 suspected events.
#
# Print the first trigger in the list to show information on the suspected event.

st = read("./data/mtcarmel.mseed")
from obspy.signal.trigger import coincidence_trigger
st.filter("bandpass", freqmin=2, freqmax=10)
triggers = coincidence_trigger("recstalta", 10, 2, st, 3, sta=1, lta=20)
print(len(triggers))
print(triggers[0])

# Go over the list of triggers in a for-loop. For each trigger/suspected event:
#
#  - print the time of the trigger
#  - read the waveform data, use [`starttime` and `endtime` arguments for `read()`](http://docs.obspy.org/packages/autogen/obspy.core.stream.read.html) to trim the data to the suspected event right during reading (avoiding to read the whole file again and again)
#  - calculate and print the network magnitude using the `netmag(st)` function from earlier
#  - make a preview plot
#
# If you're curious you can compare the crude magnitude estimates with the [table of aftershocks](http://www.seismosoc.org/publications/srl/SRL_82/srl_82-5_hamburger_et_al-esupp/Table_S2.txt) provided by the scientists that analyzed the aftershock sequence. The paper with details can be found here: ["Aftershocks of the 2008 Mt. Carmel, Illinois, Earthquake: Evidence for Conjugate Faulting near the Termination of the Wabash Valley Fault System" by M. W. Hamburger, K. Shoemaker, S. Horton, H. DeShon, M. Withers, G. L. Pavlis and E. Sherrill, SRL 2011](http://srl.geoscienceworld.org/content/82/5/735.short).

for trig in triggers:
    t = trig['time']
    st = read("./data/mtcarmel.mseed", starttime=t-3, endtime=t+15)
Ejemplo n.º 13
0
event_templates = {"UH3": []}
for t in times:
    t = UTCDateTime(t)
    st_ = st.select(station="UH3").slice(t, t + 2.5)
    event_templates["UH3"].append(st_)

t = UTCDateTime("2010-05-27T16:27:30.574999")
st_ = st.select(station="UH1").slice(t, t + 2.5)
event_templates["UH1"] = [st_]

st2 = st.copy()
trace_ids = {
    "BW.UH1..SHZ": 1,
    "BW.UH2..SHZ": 1,
    "BW.UH3..SHZ": 1,
    "BW.UH4..SHZ": 1
}
similarity_thresholds = {"UH1": 0.8, "UH3": 0.7}
trig = coincidence_trigger("classicstalta",
                           5,
                           1,
                           st2,
                           4,
                           sta=0.5,
                           lta=10,
                           trace_ids=trace_ids,
                           event_templates=event_templates,
                           similarity_threshold=similarity_thresholds)

pprint(trig)
Ejemplo n.º 14
0
    def test_coincidence_trigger(self):
        """
        Test network coincidence trigger.
        """
        st = Stream()
        files = ["BW.UH1._.SHZ.D.2010.147.cut.slist.gz",
                 "BW.UH2._.SHZ.D.2010.147.cut.slist.gz",
                 "BW.UH3._.SHZ.D.2010.147.cut.slist.gz",
                 "BW.UH4._.EHZ.D.2010.147.cut.slist.gz"]
        for filename in files:
            filename = os.path.join(self.path, filename)
            st += read(filename)
        # some prefiltering used for UH network
        st.filter('bandpass', freqmin=10, freqmax=20)

        # 1. no weighting, no stations specified, good settings
        # => 3 events, no false triggers
        # for the first test we make some additional tests regarding types
        res = coincidence_trigger("recstalta", 3.5, 1, st.copy(), 3, sta=0.5,
                                  lta=10)
        self.assertTrue(isinstance(res, list))
        self.assertEqual(len(res), 3)
        expected_keys = ['time', 'coincidence_sum', 'duration', 'stations',
                         'trace_ids']
        expected_types = [UTCDateTime, float, float, list, list]
        for item in res:
            self.assertTrue(isinstance(item, dict))
            for key, _type in zip(expected_keys, expected_types):
                self.assertIn(key, item)
                self.assertTrue(isinstance(item[key], _type))
        self.assertGreater(res[0]['time'], UTCDateTime("2010-05-27T16:24:31"))
        self.assertTrue(res[0]['time'] < UTCDateTime("2010-05-27T16:24:35"))
        self.assertTrue(4.2 < res[0]['duration'] < 4.8)
        self.assertEqual(res[0]['stations'], ['UH3', 'UH2', 'UH1', 'UH4'])
        self.assertEqual(res[0]['coincidence_sum'], 4)
        self.assertGreater(res[1]['time'], UTCDateTime("2010-05-27T16:26:59"))
        self.assertTrue(res[1]['time'] < UTCDateTime("2010-05-27T16:27:03"))
        self.assertTrue(3.2 < res[1]['duration'] < 3.7)
        self.assertEqual(res[1]['stations'], ['UH2', 'UH3', 'UH1'])
        self.assertEqual(res[1]['coincidence_sum'], 3)
        self.assertGreater(res[2]['time'], UTCDateTime("2010-05-27T16:27:27"))
        self.assertTrue(res[2]['time'] < UTCDateTime("2010-05-27T16:27:33"))
        self.assertTrue(4.2 < res[2]['duration'] < 4.4)
        self.assertEqual(res[2]['stations'], ['UH3', 'UH2', 'UH1', 'UH4'])
        self.assertEqual(res[2]['coincidence_sum'], 4)

        # 2. no weighting, station selection
        # => 2 events, no false triggers
        trace_ids = ['BW.UH1..SHZ', 'BW.UH3..SHZ', 'BW.UH4..EHZ']
        # raises "UserWarning: At least one trace's ID was not found"
        with warnings.catch_warnings(record=True) as w:
            warnings.simplefilter('always', UserWarning)
            re = coincidence_trigger("recstalta", 3.5, 1, st.copy(), 3,
                                     trace_ids=trace_ids, sta=0.5, lta=10)
            self.assertEqual(len(w), 1)
            self.assertIn("At least one trace's ID was not", str(w[0]))
        self.assertEqual(len(re), 2)
        self.assertGreater(re[0]['time'],
                           UTCDateTime("2010-05-27T16:24:31"))
        self.assertTrue(re[0]['time'] < UTCDateTime("2010-05-27T16:24:35"))
        self.assertTrue(4.2 < re[0]['duration'] < 4.8)
        self.assertEqual(re[0]['stations'], ['UH3', 'UH1', 'UH4'])
        self.assertEqual(re[0]['coincidence_sum'], 3)
        self.assertGreater(re[1]['time'],
                           UTCDateTime("2010-05-27T16:27:27"))
        self.assertTrue(re[1]['time'] < UTCDateTime("2010-05-27T16:27:33"))
        self.assertTrue(4.2 < re[1]['duration'] < 4.4)
        self.assertEqual(re[1]['stations'], ['UH3', 'UH1', 'UH4'])
        self.assertEqual(re[1]['coincidence_sum'], 3)

        # 3. weighting, station selection
        # => 3 events, no false triggers
        trace_ids = {'BW.UH1..SHZ': 0.4, 'BW.UH2..SHZ': 0.35,
                     'BW.UH3..SHZ': 0.4, 'BW.UH4..EHZ': 0.25}
        res = coincidence_trigger("recstalta", 3.5, 1, st.copy(), 1.0,
                                  trace_ids=trace_ids, sta=0.5, lta=10)
        self.assertEqual(len(res), 3)
        self.assertGreater(res[0]['time'], UTCDateTime("2010-05-27T16:24:31"))
        self.assertTrue(res[0]['time'] < UTCDateTime("2010-05-27T16:24:35"))
        self.assertTrue(4.2 < res[0]['duration'] < 4.8)
        self.assertEqual(res[0]['stations'], ['UH3', 'UH2', 'UH1', 'UH4'])
        self.assertEqual(res[0]['coincidence_sum'], 1.4)
        self.assertGreater(res[1]['time'], UTCDateTime("2010-05-27T16:26:59"))
        self.assertTrue(res[1]['time'] < UTCDateTime("2010-05-27T16:27:03"))
        self.assertTrue(3.2 < res[1]['duration'] < 3.7)
        self.assertEqual(res[1]['stations'], ['UH2', 'UH3', 'UH1'])
        self.assertEqual(res[1]['coincidence_sum'], 1.15)
        self.assertGreater(res[2]['time'], UTCDateTime("2010-05-27T16:27:27"))
        self.assertTrue(res[2]['time'] < UTCDateTime("2010-05-27T16:27:33"))
        self.assertTrue(4.2 < res[2]['duration'] < 4.4)
        self.assertEqual(res[2]['stations'], ['UH3', 'UH2', 'UH1', 'UH4'])
        self.assertEqual(res[2]['coincidence_sum'], 1.4)

        # 4. weighting, station selection, max_len
        # => 2 events, no false triggers, small event does not overlap anymore
        trace_ids = {'BW.UH1..SHZ': 0.6, 'BW.UH2..SHZ': 0.6}
        # raises "UserWarning: At least one trace's ID was not found"
        with warnings.catch_warnings(record=True) as w:
            warnings.simplefilter('always', UserWarning)
            re = coincidence_trigger("recstalta", 3.5, 1, st.copy(), 1.2,
                                     trace_ids=trace_ids,
                                     max_trigger_length=0.13, sta=0.5, lta=10)
            self.assertEqual(len(w), 2)
            self.assertIn("At least one trace's ID was not", str(w[0]))
            self.assertIn("At least one trace's ID was not", str(w[1]))
        self.assertEqual(len(re), 2)
        self.assertGreater(re[0]['time'],
                           UTCDateTime("2010-05-27T16:24:31"))
        self.assertTrue(re[0]['time'] < UTCDateTime("2010-05-27T16:24:35"))
        self.assertTrue(0.2 < re[0]['duration'] < 0.3)
        self.assertEqual(re[0]['stations'], ['UH2', 'UH1'])
        self.assertEqual(re[0]['coincidence_sum'], 1.2)
        self.assertGreater(re[1]['time'],
                           UTCDateTime("2010-05-27T16:27:27"))
        self.assertTrue(re[1]['time'] < UTCDateTime("2010-05-27T16:27:33"))
        self.assertTrue(0.18 < re[1]['duration'] < 0.2)
        self.assertEqual(re[1]['stations'], ['UH2', 'UH1'])
        self.assertEqual(re[1]['coincidence_sum'], 1.2)

        # 5. station selection, extremely sensitive settings
        # => 4 events, 1 false triggers
        # raises "UserWarning: At least one trace's ID was not found"
        with warnings.catch_warnings(record=True) as w:
            warnings.simplefilter('always', UserWarning)
            res = coincidence_trigger("recstalta", 2.5, 1, st.copy(), 2,
                                      trace_ids=['BW.UH1..SHZ', 'BW.UH3..SHZ'],
                                      sta=0.3, lta=5)
            self.assertEqual(len(w), 2)
            self.assertIn("At least one trace's ID was not", str(w[0]))
            self.assertIn("At least one trace's ID was not", str(w[1]))
        self.assertEqual(len(res), 5)
        self.assertGreater(res[3]['time'], UTCDateTime("2010-05-27T16:27:01"))
        self.assertTrue(res[3]['time'] < UTCDateTime("2010-05-27T16:27:02"))
        self.assertTrue(1.5 < res[3]['duration'] < 1.7)
        self.assertEqual(res[3]['stations'], ['UH3', 'UH1'])
        self.assertEqual(res[3]['coincidence_sum'], 2.0)

        # 6. same as 5, gappy stream
        # => same as 5 (almost, duration of 1 event changes by 0.02s)
        st2 = st.copy()
        tr1 = st2.pop(0)
        t1 = tr1.stats.starttime
        t2 = tr1.stats.endtime
        td = t2 - t1
        tr1a = tr1.slice(starttime=t1, endtime=t1 + 0.45 * td)
        tr1b = tr1.slice(starttime=t1 + 0.6 * td, endtime=t1 + 0.94 * td)
        st2.insert(1, tr1a)
        st2.insert(3, tr1b)
        with warnings.catch_warnings(record=True) as w:
            warnings.simplefilter('always', UserWarning)
            res = coincidence_trigger("recstalta", 2.5, 1, st2, 2,
                                      trace_ids=['BW.UH1..SHZ', 'BW.UH3..SHZ'],
                                      sta=0.3, lta=5)
            self.assertEqual(len(w), 2)
            self.assertIn("At least one trace's ID was not", str(w[0]))
            self.assertIn("At least one trace's ID was not", str(w[1]))
        self.assertEqual(len(res), 5)
        self.assertGreater(res[3]['time'], UTCDateTime("2010-05-27T16:27:01"))
        self.assertTrue(res[3]['time'] < UTCDateTime("2010-05-27T16:27:02"))
        self.assertTrue(1.5 < res[3]['duration'] < 1.7)
        self.assertEqual(res[3]['stations'], ['UH3', 'UH1'])
        self.assertEqual(res[3]['coincidence_sum'], 2.0)

        # 7. same as 3 but modify input trace ids and check output of trace_ids
        # and other additional information with ``details=True``
        st2 = st.copy()
        st2[0].stats.network = "XX"
        st2[1].stats.location = "99"
        st2[1].stats.network = ""
        st2[1].stats.location = "99"
        st2[1].stats.channel = ""
        st2[2].stats.channel = "EHN"
        st2[3].stats.network = ""
        st2[3].stats.channel = ""
        st2[3].stats.station = ""
        trace_ids = {'XX.UH1..SHZ': 0.4, '.UH2.99.': 0.35,
                     'BW.UH3..EHN': 0.4, '...': 0.25}
        res = coincidence_trigger("recstalta", 3.5, 1, st2, 1.0,
                                  trace_ids=trace_ids, details=True,
                                  sta=0.5, lta=10)
        self.assertEqual(len(res), 3)
        self.assertGreater(res[0]['time'], UTCDateTime("2010-05-27T16:24:31"))
        self.assertTrue(res[0]['time'] < UTCDateTime("2010-05-27T16:24:35"))
        self.assertTrue(4.2 < res[0]['duration'] < 4.8)
        self.assertEqual(res[0]['stations'], ['UH3', 'UH2', 'UH1', ''])
        self.assertEqual(res[0]['trace_ids'][0], st2[2].id)
        self.assertEqual(res[0]['trace_ids'][1], st2[1].id)
        self.assertEqual(res[0]['trace_ids'][2], st2[0].id)
        self.assertEqual(res[0]['trace_ids'][3], st2[3].id)
        self.assertEqual(res[0]['coincidence_sum'], 1.4)
        self.assertGreater(res[1]['time'], UTCDateTime("2010-05-27T16:26:59"))
        self.assertTrue(res[1]['time'] < UTCDateTime("2010-05-27T16:27:03"))
        self.assertTrue(3.2 < res[1]['duration'] < 3.7)
        self.assertEqual(res[1]['stations'], ['UH2', 'UH3', 'UH1'])
        self.assertEqual(res[1]['trace_ids'][0], st2[1].id)
        self.assertEqual(res[1]['trace_ids'][1], st2[2].id)
        self.assertEqual(res[1]['trace_ids'][2], st2[0].id)
        self.assertEqual(res[1]['coincidence_sum'], 1.15)
        self.assertGreater(res[2]['time'], UTCDateTime("2010-05-27T16:27:27"))
        self.assertTrue(res[2]['time'] < UTCDateTime("2010-05-27T16:27:33"))
        self.assertTrue(4.2 < res[2]['duration'] < 4.4)
        self.assertEqual(res[2]['stations'], ['UH3', 'UH2', 'UH1', ''])
        self.assertEqual(res[2]['trace_ids'][0], st2[2].id)
        self.assertEqual(res[2]['trace_ids'][1], st2[1].id)
        self.assertEqual(res[2]['trace_ids'][2], st2[0].id)
        self.assertEqual(res[2]['trace_ids'][3], st2[3].id)
        self.assertEqual(res[2]['coincidence_sum'], 1.4)
        expected_keys = ['cft_peak_wmean', 'cft_std_wmean', 'cft_peaks',
                         'cft_stds']
        expected_types = [float, float, list, list]
        for item in res:
            for key, _type in zip(expected_keys, expected_types):
                self.assertIn(key, item)
                self.assertTrue(isinstance(item[key], _type))
        # check some of the detailed info
        ev = res[-1]
        self.assertAlmostEqual(ev['cft_peak_wmean'], 18.101139518271076)
        self.assertAlmostEqual(ev['cft_std_wmean'], 4.800051726246676)
        self.assertAlmostEqual(ev['cft_peaks'][0], 18.985548683223936)
        self.assertAlmostEqual(ev['cft_peaks'][1], 16.852175794415011)
        self.assertAlmostEqual(ev['cft_peaks'][2], 18.64005853900883)
        self.assertAlmostEqual(ev['cft_peaks'][3], 17.572363634564621)
        self.assertAlmostEqual(ev['cft_stds'][0], 4.8909448258821362)
        self.assertAlmostEqual(ev['cft_stds'][1], 4.4446373508521804)
        self.assertAlmostEqual(ev['cft_stds'][2], 5.3499401252675964)
        self.assertAlmostEqual(ev['cft_stds'][3], 4.2723814539487703)
Ejemplo n.º 15
0
def autodetect(st, freqmin, freqmax, tapering, sta, lta, thr_on, thr_off,
               min_num_stations, deadtime_between_coincidences, time_before,
               time_after, deadtime_after_pphase):
    """ 
  + RUN TRIGGER COINCIDENT IN A LONG TIME-WINDOW (take in consideration the lta parameter length)
  + IF ANY COINCIDENCE EXISTS, COMPUTE STA/LTA FOR P-PHASE AND S-PHASE IN A SHORT TIME WINDOW
  + THEN, RETURN DICTIONARY OF EVENTS IN HYPO71 FORMAT
  """

    # PRE-PROCESSING OF RAWDATA
    print(bcolors.BOLD + "\n+ Pre-processing rawdata..." + bcolors.ENDC)
    st.detrend("demean")
    st.taper(max_percentage=tapering, type="hann")
    st.merge(method=1, fill_value='interpolate')
    st.filter("bandpass", freqmin=freqmin, freqmax=freqmax)
    st.sort()

    # REMOVE TAPERED CORNERS
    for tr in st:
        dt = tapering * (tr.stats.endtime - tr.stats.starttime)
        tr.trim(tr.stats.starttime + dt, tr.stats.endtime - dt)

    # RUN COINCIDENCE TRIGGER (STA/LTA + CORRELATION)
    print(bcolors.BOLD + "\n+ Running trigger coincidence..." + bcolors.ENDC)
    st_z = st.copy().select(channel="*Z")
    output = trigger.coincidence_trigger("recstalta",
                                         thr_on=thr_on,
                                         thr_off=thr_off,
                                         stream=st_z,
                                         thr_coincidence_sum=min_num_stations,
                                         sta=sta,
                                         lta=lta,
                                         trigger_off_extension=0,
                                         similarity_threshold=0.7,
                                         details=True)

    # CREATE OUTPUT DICTIONARY OF EVENTS
    coincidences_dict = {}

    # LOOP OVER EACH EVENT (if anyone is found)
    timeX_old = 0
    evnum = 1
    for event in output:
        timeX = event['time']
        traces = event['stations']

        # IF EVENT IS REPEATED, THEN CONTINUE (JUMP) TO THE NEXT ITERATION
        if abs(timeX - timeX_old) < deadtime_between_coincidences:
            continue
        timeX_old = cp(timeX)

        # PRINT TRIGGER COINCIDENCE TIME
        print(bcolors.BOLD + "\n[%i] Coincident found at %s ..." %
              (evnum, timeX.strftime("%Y-%m-%d %H:%M:%S")) + bcolors.ENDC)

        # LOOP OVER EACH STATION + CUT SHORT SEGMENT
        picks_list = []
        for stname in traces:
            try:
                st_short = st.copy().select(station=stname)
                t1 = timeX - time_before
                t2 = timeX + time_after
                st_short.trim(t1, t2)

                ######## RUN STA/LTA FOR P-PHASE ########
                tr_z = st_short.select(channel="*Z")[0]
                cft = trigger.recursive_sta_lta(
                    tr_z.data, int(tr_z.stats.sampling_rate * sta),
                    int(tr_z.stats.sampling_rate * lta))
                on_off = trigger.trigger_onset(cft, thr_on, thr_off)
                #trigger.plot_trigger(tr_z, cft,thr_on, thr_off, show=True)
                if len(on_off) > 0:
                    weight = 0
                    if len(on_off) > 1:
                        weight = 1
                        #print(bcolors.WARNING + "(warning: more than one alert for P-phase at %s)"%(tr_z.stats.station) + bcolors.ENDC  )

                    # DEFINE ARRIVAL TIME OF THE P-PHASE AS THE FIRST TRIGGER FOUND
                    alert_P = tr_z.times("UTCDateTime")[on_off[0][0]]
                    alert_pattern = "%s P %.2f %i" % (
                        tr_z.stats.station, alert_P.timestamp, weight)

                    ######## RUN STA/LTA FOR S-PHASE (north component) ########
                    flag_Snorth = False
                    try:
                        tr_n = st_short.select(channel="*N")[0]
                        tr_n.trim(alert_P + deadtime_after_pphase,
                                  tr_n.stats.endtime)
                        cft_n = trigger.recursive_sta_lta(
                            tr_n.data, int(tr_n.stats.sampling_rate * sta),
                            int(tr_n.stats.sampling_rate * lta))
                        on_off_Sn = trigger.trigger_onset(
                            cft_n, thr_on, thr_off)
                        #trigger.plot_trigger(tr_n, cft_n,thr_on, thr_off, show=True)
                        if len(on_off_Sn) > 0:
                            flag_Snorth = True
                            weight_Sn = 0
                            if len(on_off_Sn) > 1:
                                weight_Sn = 1
                                #print(bcolors.WARNING + "(Warning: more than one alert for S-phase north component)" + bcolors.ENDC)

                            # DEFINE ARRIVAL TIME OF THE S-PHASE AS THE FIRST TRIGGER FOUND
                            alert_Sn = tr_n.times("UTCDateTime")[on_off_Sn[0]
                                                                 [0]]

                    except:
                        continue

                    ######## RUN STA/LTA FOR S-PHASE (east component) ########
                    flag_Seast = False
                    try:
                        tr_e = st_short.select(channel="*E")[0]
                        tr_e.trim(alert_P + deadtime_after_pphase,
                                  tr_e.stats.endtime)
                        cft_e = trigger.recursive_sta_lta(
                            tr_e.data, int(tr_e.stats.sampling_rate * sta),
                            int(tr_e.stats.sampling_rate * lta))
                        on_off_Se = trigger.trigger_onset(
                            cft_e, thr_on, thr_off)
                        #trigger.plot_trigger(tr_e, cft_e,thr_on, thr_off, show=True)
                        if len(on_off_Se) > 0:
                            flag_Seast = True
                            weight_Se = 0
                            if len(on_off_Se) > 1:
                                weight_Se = 1
                                #print(bcolors.WARNING + "(Warning: more than one alert for S-phase east component)" + bcolors.ENDC)

                            # DEFINE ARRIVAL TIME OF THE S-PHASE AS THE FIRST TRIGGER FOUND
                            alert_Se = tr_e.times("UTCDateTime")[on_off_Se[0]
                                                                 [0]]

                    except:
                        continue

                    # DEFINE ARRIVAL TIME OF THE S-PHASE BETWEEN THE NORTH AND EAST COMPONENT
                    if flag_Snorth and flag_Seast:
                        if weight_Sn < weight_Se:
                            if alert_Sn - alert_P <= deadtime_after_pphase and alert_Sn - alert_P > 0:
                                alert_S = cp(alert_Sn)
                                weight_S = cp(weight_Sn)
                                alert_pattern += " S %.2f %i" % (
                                    alert_S.timestamp, weight_S)

                        elif weight_Sn > weight_Se:
                            if alert_Se - alert_P <= deadtime_after_pphase and alert_Se - alert_P > 0:
                                alert_S = cp(alert_Se)
                                weight_S = cp(weight_Se)
                                alert_pattern += " S %.2f %i" % (
                                    alert_S.timestamp, weight_S)

                        else:
                            timediff = abs(alert_Sn - alert_Se)
                            if timediff <= 3:
                                if alert_Sn <= alert_Se:
                                    alert_S = alert_Sn + timediff / 2.
                                else:
                                    alert_S = alert_Sn - timediff / 2.
                                weight_S = cp(weight_Sn)
                                alert_pattern += " S %.2f %i" % (
                                    alert_S.timestamp, weight_S)

                    elif flag_Snorth and not flag_Seast:
                        if alert_Sn - alert_P <= deadtime_after_pphase and alert_Sn - alert_P > 0:
                            alert_S = cp(alert_Sn)
                            alert_pattern += " S %.2f %i" % (alert_S.timestamp,
                                                             weight_Sn)

                    elif not flag_Snorth and flag_Seast:
                        if alert_Se - alert_P <= deadtime_after_pphase and alert_Se - alert_P > 0:
                            alert_S = cp(alert_Se)
                            alert_pattern += " S %.2f %i" % (alert_S.timestamp,
                                                             weight_Se)

                    # PRINT OUTPUT
                    print(" " * 4 + bcolors.OKGREEN + alert_pattern +
                          bcolors.ENDC)
                    picks_list.append(alert_pattern)

            except:
                continue

        # APPEND PICKS TO DICTIONARY
        evname = "Event_%03i" % (evnum)
        coincidences_dict.update({evname: cp(picks_list)})
        evnum += 1

    return coincidences_dict
Ejemplo n.º 16
0
stations = ["AIGLE", "SENIN", "DIX", "LAUCH", "MMK", "SIMPL"]
st = Stream()

for station in stations:
    try:
        tmp = client.get_waveforms("CH", station, "", "[EH]HZ", t, t2,
                                   metadata=True)
    except:
        print(station, "---")
        continue
    st += tmp

st.taper()
st.filter("bandpass", freqmin=1, freqmax=20)
triglist = coincidence_trigger("recstalta", 10, 2, st, 4, sta=0.5, lta=10)
print(len(triglist), "events triggered.")

for trig in triglist:
    closest_sta = trig['stations'][0]
    tr = st.select(station=closest_sta)[0]
    trig['latitude'] = tr.stats.coordinates.latitude
    trig['longitude'] = tr.stats.coordinates.longitude

paz_wa = {'sensitivity': 2800, 'zeros': [0j], 'gain': 1,
          'poles': [-6.2832 - 4.7124j, -6.2832 + 4.7124j]}

for trig in triglist:
    t = trig['time']
    print("#" * 80)
    print("Trigger time:", t)
Ejemplo n.º 17
0
    def test_correlate_stream_template_and_correlation_detector(self):
        template = read().filter('highpass', freq=5).normalize()
        pick = UTCDateTime('2009-08-24T00:20:07.73')
        template.trim(pick, pick + 10)
        n1 = len(template[0])
        n2 = 100 * 3600  # 1 hour
        dt = template[0].stats.delta
        # shift one template Trace
        template[1].stats.starttime += 5
        stream = template.copy()
        np.random.seed(42)
        for tr, trt in zip(stream, template):
            tr.stats.starttime += 24 * 3600
            tr.data = np.random.random(n2) - 0.5  # noise
            if tr.stats.channel[-1] == 'Z':
                tr.data[n1:2*n1] += 10 * trt.data
                tr.data = tr.data[:-n1]
            tr.data[5*n1:6*n1] += 100 * trt.data
            tr.data[20*n1:21*n1] += 2 * trt.data
        # make one template trace a bit shorter
        template[2].data = template[2].data[:-n1 // 5]
        # make two stream traces a bit shorter
        stream[0].trim(5, None)
        stream[1].trim(1, 20)
        # second template
        pick2 = stream[0].stats.starttime + 20 * n1 * dt
        template2 = stream.slice(pick2 - 5, pick2 + 5)
        # test cross correlation
        stream_orig = stream.copy()
        template_orig = template.copy()
        ccs = correlate_stream_template(stream, template)
        self.assertEqual(len(ccs), len(stream))
        self.assertEqual(stream[1].stats.starttime, ccs[0].stats.starttime)
        self.assertEqual(stream_orig, stream)
        self.assertEqual(template_orig, template)
        # test if traces with not matching seed ids are discarded
        ccs = correlate_stream_template(stream[:2], template[1:])
        self.assertEqual(len(ccs), 1)
        self.assertEqual(stream_orig, stream)
        self.assertEqual(template_orig, template)
        # test template_time parameter
        ccs1 = correlate_stream_template(stream, template)
        template_time = template[0].stats.starttime + 100
        ccs2 = correlate_stream_template(stream, template,
                                         template_time=template_time)
        self.assertEqual(len(ccs2), len(ccs1))
        delta = ccs2[0].stats.starttime - ccs1[0].stats.starttime
        self.assertAlmostEqual(delta, 100)
        # test if all three events found
        detections, sims = correlation_detector(stream, template, 0.2, 30)
        self.assertEqual(len(detections), 3)
        dtime = pick + n1 * dt + 24 * 3600
        self.assertAlmostEqual(detections[0]['time'], dtime)
        self.assertEqual(len(sims), 1)
        self.assertEqual(stream_orig, stream)
        self.assertEqual(template_orig, template)
        # test if xcorr stream is suitable for coincidence_trigger
        # result should be the same, return values related
        ccs = correlate_stream_template(stream, template)
        triggers = coincidence_trigger(None, 0.2, -1, ccs, 2,
                                       max_trigger_length=30, details=True)
        self.assertEqual(len(triggers), 2)
        for d, t in zip(detections[1:], triggers):
            self.assertAlmostEqual(np.mean(t['cft_peaks']), d['similarity'])
        # test template_magnitudes
        detections, _ = correlation_detector(stream, template, 0.2, 30,
                                             template_magnitudes=1)
        self.assertAlmostEqual(detections[1]['amplitude_ratio'], 100, delta=1)
        self.assertAlmostEqual(detections[1]['magnitude'], 1 + 8 / 3,
                               delta=0.01)
        self.assertAlmostEqual(detections[2]['amplitude_ratio'], 2, delta=2)
        detections, _ = correlation_detector(stream, template, 0.2, 30,
                                             template_magnitudes=True)
        self.assertAlmostEqual(detections[1]['amplitude_ratio'], 100, delta=1)
        self.assertNotIn('magnitude', detections[1])
        self.assertEqual(stream_orig, stream)
        self.assertEqual(template_orig, template)
        # test template names
        detections, _ = correlation_detector(stream, template, 0.2, 30,
                                             template_names='eq')
        self.assertEqual(detections[0]['template_name'], 'eq')
        detections, _ = correlation_detector(stream, template, 0.2, 30,
                                             template_names=['eq'], plot=True)
        self.assertEqual(detections[0]['template_name'], 'eq')
        # test similarity parameter with additional constraints
        # test details=True

        def simf(ccs):
            ccmatrix = np.array([tr.data for tr in ccs])
            comp_thres = np.sum(ccmatrix > 0.2, axis=0) > 1
            similarity = ccs[0].copy()
            similarity.data = np.mean(ccmatrix, axis=0) * comp_thres
            return similarity
        detections, _ = correlation_detector(stream, template, 0.1, 30,
                                             similarity_func=simf,
                                             details=True)
        self.assertEqual(len(detections), 2)
        for d in detections:
            self.assertAlmostEqual(np.mean(list(d['cc_values'].values())),
                                   d['similarity'])
        # test if properties from find_peaks function are returned
        detections, sims = correlation_detector(stream, template, 0.1, 30,
                                                threshold=0.16, details=True,
                                                similarity_func=simf)
        try:
            from scipy.signal import find_peaks  # noqa
        except ImportError:
            self.assertEqual(len(detections), 2)
            self.assertNotIn('left_threshold', detections[0])
        else:
            self.assertEqual(len(detections), 1)
            self.assertIn('left_threshold', detections[0])
        # also check the _find_peaks function
        distance = int(round(30 / sims[0].stats.delta))
        indices = _find_peaks(sims[0].data, 0.1, distance, distance)
        self.assertEqual(len(indices), 2)
        # test distance parameter
        detections, _ = correlation_detector(stream, template, 0.2, 500)
        self.assertEqual(len(detections), 1)
        # test more than one template
        # just 2 detections for first template, because second template has
        # a higher similarity for third detection
        templates = (template, template2)
        templatetime2 = pick2 - 10
        template_times = (template[0].stats.starttime, templatetime2)
        detections, _ = correlation_detector(stream, templates, (0.2, 0.3), 30,
                                             plot=stream,
                                             template_times=template_times,
                                             template_magnitudes=(2, 5))
        self.assertGreater(len(detections), 0)
        self.assertIn('template_id', detections[0])
        detections0 = [d for d in detections if d['template_id'] == 0]
        self.assertEqual(len(detections0), 2)
        self.assertEqual(len(detections), 3)
        self.assertAlmostEqual(detections[2]['similarity'], 1)
        self.assertAlmostEqual(detections[2]['magnitude'], 5)
        self.assertEqual(detections[2]['time'], templatetime2)
        # test if everything is correct if template2 and stream do not have
        # any ids in common
        templates = (template, template2[2:])
        with warnings.catch_warnings():
            warnings.simplefilter("ignore")
            detections, sims = correlation_detector(
                stream[:1], templates, 0.2, 30, plot=True,
                template_times=templatetime2, template_magnitudes=2)
        detections0 = [d for d in detections if d['template_id'] == 0]
        self.assertEqual(len(detections0), 3)
        self.assertEqual(len(detections), 3)
        self.assertEqual(len(sims), 2)
        self.assertIsInstance(sims[0], Trace)
        self.assertIs(sims[1], None)
Ejemplo n.º 18
0

# Define min and max dates for which to run the trigger routine
mindate = mpdates.date2num(dt.datetime.strptime('20011119', '%Y%m%d'))
maxdate = mpdates.date2num(dt.datetime.strptime('20011120', '%Y%m%d'))
x = range(int(mindate), int(maxdate))
days = [mpdates.num2date(xd) for xd in x]

# Create a list of lists of daily coincidence triggers
coinc_triggers = []
for day in days:
    print(str(day.year) + '-' + str(day.month) + '-' + str(day.day))
    st = read_data_from_wav_paths(day, data_paths)
    # Run coincidence trigger for each day
    day_coinc_triggers = coincidence_trigger(trigger_type="recstalta", thr_on=3.5,
                                             thr_off=3.0, stream=st,
                                             sta=0.3, lta=10, thr_coincidence_sum=5,
                                             details=True, delete_long_trigger=False)
    # Don't write empty list if no triggers on that day
    if day_coinc_triggers != []:
        print('Adding triggers...')
        coinc_triggers.append(day_coinc_triggers)
# Now that the daily coincidence triggers are created
# we go through them and create mseed files of all the
# available waveforms around the trigger time
for day_trig in coinc_triggers:
    day = day_trig[-1]['time']
    all_wav_files = []
    for j in range(len(data_paths)):
        all_wav_files += glob.glob(data_paths[j]+'/'+day.strftime('Y%Y/R%j.01') +
                                   '/*' + day.strftime('%Y.%j'))
    all_wav_files.sort()
Ejemplo n.º 19
0
 def test_coincidence_trigger_with_similarity_checking(self):
     """
     Test network coincidence trigger with cross correlation similarity
     checking of given event templates.
     """
     st = Stream()
     files = ["BW.UH1._.SHZ.D.2010.147.cut.slist.gz",
              "BW.UH2._.SHZ.D.2010.147.cut.slist.gz",
              "BW.UH3._.SHZ.D.2010.147.cut.slist.gz",
              "BW.UH3._.SHN.D.2010.147.cut.slist.gz",
              "BW.UH3._.SHE.D.2010.147.cut.slist.gz",
              "BW.UH4._.EHZ.D.2010.147.cut.slist.gz"]
     for filename in files:
         filename = os.path.join(self.path, filename)
         st += read(filename)
     # some prefiltering used for UH network
     st.filter('bandpass', freqmin=10, freqmax=20)
     # set up template event streams
     times = ["2010-05-27T16:24:33.095000", "2010-05-27T16:27:30.370000"]
     templ = {}
     for t in times:
         t = UTCDateTime(t)
         st_ = st.select(station="UH3").slice(t, t + 2.5).copy()
         templ.setdefault("UH3", []).append(st_)
     times = ["2010-05-27T16:27:30.574999"]
     for t in times:
         t = UTCDateTime(t)
         st_ = st.select(station="UH1").slice(t, t + 2.5).copy()
         templ.setdefault("UH1", []).append(st_)
     # add another template with different SEED ID, it should be ignored
     # (this can happen when using many templates over a long time period
     # and instrument changes over time)
     st_ = st_.copy()
     for tr in st_:
         tr.stats.channel = 'X' + tr.stats.channel[1:]
     templ['UH1'].insert(0, st_)
     trace_ids = {"BW.UH1..SHZ": 1,
                  "BW.UH2..SHZ": 1,
                  "BW.UH3..SHZ": 1,
                  "BW.UH4..EHZ": 1}
     similarity_thresholds = {"UH1": 0.8, "UH3": 0.7}
     with warnings.catch_warnings(record=True) as w:
         # avoid getting influenced by the warning filters getting set up
         # differently in obspy-runtests.
         # (e.g. depending on options "-v" and "-q")
         warnings.resetwarnings()
         trig = coincidence_trigger(
             "classicstalta", 5, 1, st.copy(), 4, sta=0.5, lta=10,
             trace_ids=trace_ids, event_templates=templ,
             similarity_threshold=similarity_thresholds)
     # four warnings get raised
     self.assertEqual(len(w), 4)
     self.assertEqual(
         str(w[0].message),
         "At least one trace's ID was not found in the trace ID list and "
         "was disregarded (BW.UH3..SHN)")
     self.assertEqual(
         str(w[1].message),
         "At least one trace's ID was not found in the trace ID list and "
         "was disregarded (BW.UH3..SHE)")
     self.assertEqual(
         str(w[2].message),
         'Skipping trace BW.UH1..XHZ in template correlation (not present '
         'in stream to check).')
     self.assertEqual(
         str(w[3].message),
         "Skipping template(s) for station 'UH1': No common SEED IDs when "
         "comparing template (BW.UH1..XHZ) and data streams (BW.UH1..SHZ, "
         "BW.UH2..SHZ, BW.UH3..SHE, BW.UH3..SHN, BW.UH3..SHZ, "
         "BW.UH4..EHZ).")
     # check floats in resulting dictionary separately
     self.assertAlmostEqual(trig[0].pop('duration'), 3.96, places=6)
     self.assertAlmostEqual(trig[1].pop('duration'), 1.99, places=6)
     self.assertAlmostEqual(trig[2].pop('duration'), 1.92, places=6)
     self.assertAlmostEqual(trig[3].pop('duration'), 3.92, places=6)
     self.assertAlmostEqual(trig[0]['similarity'].pop('UH1'),
                            0.94149447384, places=6)
     self.assertAlmostEqual(trig[0]['similarity'].pop('UH3'), 1,
                            places=6)
     self.assertAlmostEqual(trig[1]['similarity'].pop('UH1'),
                            0.65228204570, places=6)
     self.assertAlmostEqual(trig[1]['similarity'].pop('UH3'),
                            0.72679293429, places=6)
     self.assertAlmostEqual(trig[2]['similarity'].pop('UH1'),
                            0.89404458774, places=6)
     self.assertAlmostEqual(trig[2]['similarity'].pop('UH3'),
                            0.74581409371, places=6)
     self.assertAlmostEqual(trig[3]['similarity'].pop('UH1'), 1,
                            places=6)
     self.assertAlmostEqual(trig[3]['similarity'].pop('UH3'), 1,
                            places=6)
     remaining_results = \
         [{'coincidence_sum': 4.0,
           'similarity': {},
           'stations': ['UH3', 'UH2', 'UH1', 'UH4'],
           'time': UTCDateTime(2010, 5, 27, 16, 24, 33, 210000),
           'trace_ids': ['BW.UH3..SHZ', 'BW.UH2..SHZ', 'BW.UH1..SHZ',
                         'BW.UH4..EHZ']},
          {'coincidence_sum': 3.0,
           'similarity': {},
           'stations': ['UH3', 'UH1', 'UH2'],
           'time': UTCDateTime(2010, 5, 27, 16, 25, 26, 710000),
           'trace_ids': ['BW.UH3..SHZ', 'BW.UH1..SHZ', 'BW.UH2..SHZ']},
          {'coincidence_sum': 3.0,
           'similarity': {},
           'stations': ['UH2', 'UH1', 'UH3'],
           'time': UTCDateTime(2010, 5, 27, 16, 27, 2, 260000),
           'trace_ids': ['BW.UH2..SHZ', 'BW.UH1..SHZ', 'BW.UH3..SHZ']},
          {'coincidence_sum': 4.0,
           'similarity': {},
           'stations': ['UH3', 'UH2', 'UH1', 'UH4'],
           'time': UTCDateTime(2010, 5, 27, 16, 27, 30, 510000),
           'trace_ids': ['BW.UH3..SHZ', 'BW.UH2..SHZ', 'BW.UH1..SHZ',
                         'BW.UH4..EHZ']}]
     self.assertEqual(trig, remaining_results)
Ejemplo n.º 20
0
                        # Run coincidence trigger on a single CC trace
                        # resulting from the CFTs stack
                        # essential threshold parameters
                        # Cross correlation thresholds
                        xcor_cut = thresholdd
                        thr_on = thresholdd
                        thr_off = thresholdd - 0.15 * thresholdd
                        thr_coincidence_sum = 1.0
                        similarity_thresholds = {"BH": thr_on}
                        trigger_type = None
                        triglist = coincidence_trigger(
                            trigger_type,
                            thr_on,
                            thr_off,
                            stcc,
                            thr_coincidence_sum,
                            trace_ids=None,
                            similarity_thresholds=similarity_thresholds,
                            delete_long_trigger=False,
                            trigger_off_extension=3.0,
                            details=True,
                        )
                        ntrig = len(triglist)

                        tt = np.empty(ntrig)
                        cs = np.empty(ntrig)
                        nch = np.empty(ntrig)
                        cft_ave = np.empty(ntrig)
                        crt = np.empty(ntrig)
                        cft_ave_trg = np.empty(ntrig)
                        crt_trg = np.empty(ntrig)
                        nch3 = np.empty(ntrig)
Ejemplo n.º 21
0
def trigger(st, stC, rtable, opt):

    """
    Run triggering algorithm on a stream of data.

    st: OBSPy stream of data
    rtable: Repeater table contains reference time of previous trigger in samples
    opt: Options object describing station/run parameters

    Returns triggered traces as OBSPy trace object updates ptime for next run 
    """
    
    tr = st[0]
    t = tr.stats.starttime

    cft = coincidence_trigger(opt.trigalg, opt.trigon, opt.trigoff, stC, opt.nstaC,
        sta=opt.swin, lta=opt.lwin, details=True)
            
    if len(cft) > 0:
        
        ind = 0
        
        # Slice out the data from st and save the maximum STA/LTA ratio value for
        # use in orphan expiration
        
        # Convert ptime from time of last trigger to seconds before start time
        if rtable.attrs.ptime:
            ptime = (UTCDateTime(rtable.attrs.ptime) - t)
        else:
            ptime = -opt.mintrig
                
        for n in range(len(cft)):
                    
            ttime = cft[n]['time'] # This is a UTCDateTime, not samples
            
            if (ttime >= t + opt.atrig) and (ttime >= t + ptime +
                opt.mintrig) and (ttime < t + len(tr.data)/opt.samprate -
                2*opt.atrig):
                
                ptime = ttime - t
                
                # Cut out and append all data to first trace              
                tmp = st.slice(ttime - opt.ptrig, ttime + opt.atrig)
                ttmp = tmp.copy()
                ttmp = ttmp.trim(ttime - opt.ptrig, ttime + opt.atrig + 0.05, pad=True,
                    fill_value=0)
                ttmp[0].data = ttmp[0].data[0:opt.wshape] - np.mean(
                    ttmp[0].data[0:opt.wshape])
                for s in range(1,len(ttmp)):
                    ttmp[0].data = np.append(ttmp[0].data, ttmp[s].data[
                        0:opt.wshape] - np.mean(ttmp[s].data[0:opt.wshape]))
                ttmp[0].stats.maxratio = np.max(cft[n]['cft_peaks'])
                if ind is 0:
                    trigs = Stream(ttmp[0])
                    ind = ind+1
                else:
                    trigs = trigs.append(ttmp[0])
                                                         
        if ind is 0:
            return []
        else:
            rtable.attrs.ptime = (t + ptime).isoformat()
            return trigs
    else:
        return []
Ejemplo n.º 22
0
trace_ids = {'YN.QIJ.00.BHZ': 1}
similarity_thresholds = {"QIJ": 0.6}
'''
event_templates=[]
event_templates.append(st_)
trig = templates_max_similarity(st2, st[0].stats['starttime'], event_templates)
print(trig)
'''
trig = coincidence_trigger("classicstalta",
                           4,
                           0.5,
                           st,
                           1,
                           sta=0.4,
                           lta=5,
                           trace_ids=trace_ids,
                           event_templates=event_templates,
                           details=True,
                           delete_long_trigger=True,
                           trigger_off_extension=5,
                           similarity_threshold=similarity_thresholds)
'''
trig = coincidence_trigger("classicstalta", 10, 1, st2, 1, sta=0.2, lta=5, 
                          trace_ids=trace_ids,
                          event_templates=event_templates,details=True,
                          delete_long_trigger=True, 
                          trigger_off_extension=5,
                          similarity_threshold=similarity_thresholds)
'''
#pprint(trig)