예제 #1
0
파일: testing.py 프로젝트: sgjholt/seismo
def PS_AR_Picker(st, flag, lower_F, upper_F):
    upper_F = float(upper_F)
    
    group_size = int(len(st)/6) #amount of stations in each group ...
                                    #(EW1, EW2 etc..)
    half_group = int(len(st)/2)

    
    st.taper(max_percentage=0.1)
    st.filter(
    'bandpass', freqmin = lower_F, freqmax=upper_F, corners=2, zerophase=True)

    if flag == 'Downhole':     
        for i in range(0, group_size):
            tr1 = st[i]
            tr2 = st[i+group_size]
            tr3 = st[i+group_size*2]

            df = tr1.stats.sampling_rate
            p_pick, s_pick = ar_pick(
            tr1.data, tr2.data, tr3.data, df, 1.0, 20.0, 1.0, 0.1, 4.0, 1.0, 2, 8, 0.1, 0.2)
            print('{}s {}s from station {}' .format(p_pick, s_pick, i+1))
    if flag == 'Surface':
        for i in range(0, group_size):
            tr1 = st[i]
            tr2 = st[half_group+i+group_size] 
            tr3 = st[half_group+i+group_size*2]

            df = tr1.stats.sampling_rate
            p_pick, s_pick = ar_pick(
            tr1.data, tr2.data, tr3.data, df, 1.0, 20.0, 1.0, 0.1, 4.0, 1.0, 2, 8, 0.1, 0.2)
            print('{}  s {}  s from {}' .format(p_pick, s_pick, i+half_group+1))
예제 #2
0
    def test_ar_pick(self):
        """
        Test ar_pick against implementation for UNESCO short course
        """
        data = []
        for channel in ['z', 'n', 'e']:
            file = os.path.join(self.path,
                                'loc_RJOB20050801145719850.' + channel)
            data.append(np.loadtxt(file, dtype=np.float32))
        # some default arguments
        samp_rate, f1, f2, lta_p, sta_p, lta_s, sta_s, m_p, m_s, l_p, l_s = \
            200.0, 1.0, 20.0, 1.0, 0.1, 4.0, 1.0, 2, 8, 0.1, 0.2
        ptime, stime = ar_pick(data[0], data[1], data[2], samp_rate, f1, f2,
                               lta_p, sta_p, lta_s, sta_s, m_p, m_s, l_p, l_s)
        self.assertAlmostEqual(ptime, 30.6350002289)
        # seems to be strongly machine dependent, go for int for 64 bit
        # self.assertEqual(int(stime + 0.5), 31)
        self.assertAlmostEqual(stime, 31.165, delta=0.05)

        # All three arrays must have the same length, otherwise an error is
        # raised.
        with self.assertRaises(ValueError) as err:
            ar_pick(data[0], data[1], np.zeros(1), samp_rate, f1, f2, lta_p,
                    sta_p, lta_s, sta_s, m_p, m_s, l_p, l_s)
        self.assertEqual(err.exception.args[0],
                         "All three data arrays must have the same length.")
예제 #3
0
파일: test_trigger.py 프로젝트: Brtle/obspy
    def test_ar_pick(self):
        """
        Test ar_pick against implementation for UNESCO short course
        """
        data = []
        for channel in ['z', 'n', 'e']:
            file = os.path.join(self.path,
                                'loc_RJOB20050801145719850.' + channel)
            data.append(np.loadtxt(file, dtype=np.float32))
        # some default arguments
        samp_rate, f1, f2, lta_p, sta_p, lta_s, sta_s, m_p, m_s, l_p, l_s = \
            200.0, 1.0, 20.0, 1.0, 0.1, 4.0, 1.0, 2, 8, 0.1, 0.2
        ptime, stime = ar_pick(data[0], data[1], data[2], samp_rate, f1, f2,
                               lta_p, sta_p, lta_s, sta_s, m_p, m_s, l_p, l_s)
        self.assertAlmostEqual(ptime, 30.6350002289)
        # seems to be strongly machine dependent, go for int for 64 bit
        # self.assertEqual(int(stime + 0.5), 31)
        self.assertAlmostEqual(stime, 31.165, delta=0.05)

        # All three arrays must have the same length, otherwise an error is
        # raised.
        with self.assertRaises(ValueError) as err:
            ar_pick(data[0], data[1], np.zeros(1), samp_rate, f1, f2, lta_p,
                    sta_p, lta_s, sta_s, m_p, m_s, l_p, l_s)
        self.assertEqual(err.exception.args[0],
                         "All three data arrays must have the same length.")
def predictinput(sacfile, algo):
    #print(input_path)
    bhnfile = read("/home/shilpa/Desktop/earthquakeproject/files/" + sacfile)
    bhn_tr = bhnfile[0]
    df = bhn_tr.stats.sampling_rate
    bhn_trigger = recursive_sta_lta(bhn_tr.data, int(5 * df), int(10 * df))
    bhnonoff = trigger_onset(bhn_trigger, 1.2, 0.5)
    p_pick, s_pick = ar_pick(bhnfile[0].data, bhnfile[0].data, bhnfile[0].data,
                             df, 1.0, 20.0, 1.0, 0.1, 4.0, 1.0, 2, 8, 0.1, 0.2)
    data = []
    temp = []
    temp.append(200)
    temp.append(p_pick)
    temp.append(s_pick)
    temp.append(int(bhnonoff[0][1]) - int(bhnonoff[0][0]))
    data.append(temp)
    print(data)
    if algo == "decisiontree":
        prediction = d_model.predict(data)
    elif algo == "randomforest":
        prediction = r_model.predict(data)
    elif algo == "svm":
        prediction = r_model.predict(data)
    result = " "
    if (prediction[0] == 1):
        result = "an Earthquake"
    else:
        result = "No Earthquake"
    return result
예제 #5
0
def save_velocity_data(inflie):
    try:
        st = unpack(inflie)
        my_st = st.copy()
        my_st.integrate()
        my_st.filter("highpass", freq=0.075)

        p_pick, s_pick = ar_pick(my_st[0].data, my_st[1].data, my_st[2].data,
                                 my_st[0].stats.sampling_rate, 1.0, 20.0, 1.0,
                                 0.1, 4.0, 1.0, 2, 8, 0.1, 0.2)
        p_arrival = int(p_pick * 100)
        # print(p_arrival)

        snr = get_snr(my_st, p_arrival)
        pgv, intensity = calc_pgv_intensity(my_st)

        inflie = inflie.split('/')[-1]
        outname = "_".join([
            inflie[:-4], my_st[0].stats.station,
            str(intensity),
            str(pgv),
            str(snr)
        ])
        plot_vel_waveform(my_st, p_arrival, outname)
        sample = 100
        E, N, Z = get_p_arrival_data(my_st, p_arrival, sample, outname)

        if not os.path.exists("vel_joblib"):
            os.makedirs("vel_joblib")
        outname = "vel_joblib/" + outname + ".joblib"
        joblib.dump([snr, pgv, intensity, E, N, Z], outname)
    except Exception as e:
        print(e)
예제 #6
0
def predict(dtfl, ev_list, dataOperator):
    for c, evi in enumerate(ev_list):
        try:
            if c % 1000 == 0:
                print(c)
            dataset = dtfl.get('data/'+str(evi))
            data = np.array(dataset)

            pre_E = trigger_onset(recursive_sta_lta(
                data[:, 0], config['sta_window'], config['lta_window']), config['on_trigger'], config['off_trigger'])
            pre_N = trigger_onset(recursive_sta_lta(
                data[:, 1], config['sta_window'], config['lta_window']), config['on_trigger'], config['off_trigger'])
            # pre_Z = trigger_onset(recursive_sta_lta(
            #     data[:, 2], config['sta_window'], config['lta_window']), config['on_trigger'], config['off_trigger'])

            N_end_time, E_end_time = 6000, 6000
            if len(pre_E) == 0 and len(pre_N) == 0:
                dataOperator.data_writer(dataset.attrs['trace_name'], dataset.attrs['p_arrival_sample'],
                                        dataset.attrs['s_arrival_sample'], dataset.attrs['coda_end_sample'], 
                                        -1, -1, -1, dataset.attrs['trace_category'], "noise")
                continue

            if dataset.attrs['trace_category'] == 'noise':
                dataOperator.data_writer(dataset.attrs['trace_name'], dataset.attrs['p_arrival_sample'],
                                        dataset.attrs['s_arrival_sample'], dataset.attrs['coda_end_sample'], 
                                        -1, -1, -1, dataset.attrs['trace_category'], "earthquake_local") 
                continue           

            if len(pre_E):
                E_end_time = pre_E[-1][1]

            if len(pre_N):
                N_end_time = pre_N[-1][1]

            end_time = (E_end_time + N_end_time) / 2

            p_pick, s_pick = ar_pick(data[:, 0], data[:, 1], data[:, 2], 100,
                                    1.0, 20.0, 1.0, 0.1, 4.0, 1.0, 2, 8, 0.1, 0.2)

            p_pick, s_pick = p_pick*100, s_pick*100

            # y_true = [float(dataset.attrs['p_arrival_sample']),
            #           float(dataset.attrs['s_arrival_sample']), float(dataset.attrs['coda_end_sample'][0][0])]
            # y_pred = [p_pick, s_pick, end_time]

            # p_true = np.zeros(shape=(6000,))
            # p_true[p_pick-20:p_pick+21] = 1

            # a = np.array(y_true)
            # b = np.array(y_pred)
            # print(a * b)
            # break
            dataOperator.data_writer(dataset.attrs['trace_name'], dataset.attrs['p_arrival_sample'],
                                    dataset.attrs['s_arrival_sample'], dataset.attrs['coda_end_sample'], 
                                    int(p_pick), int(s_pick), int(end_time), dataset.attrs['trace_category'], "earthquake_local")
        except:
            continue
    return
예제 #7
0
def _doubleChecking(station_list, detections, preprocessed_dir, moving_window, thr_on=3.7, thr_of=0.5):
    'this function perform traditional detection (STA/LTA) and picker (AIC) to double check for events on the remaining stations when an event has been detected on more than two stations'
    for stt in station_list:
        sttt = stt.split('_')[0]
      #  print(sttt)
        if sttt not in detections['station'].to_list():
            new_picks = {}                    
            if platform.system() == 'Windows':
                file_name = preprocessed_dir+"\\"+sttt+".hdf5"
                file_csv = preprocessed_dir+"\\"+sttt+".csv"
            else:
                file_name = preprocessed_dir+"/"+sttt+".hdf5"
                file_csv = preprocessed_dir+"/"+sttt+".csv"
            
            df = pd.read_csv(file_csv)
            df['start_time'] = pd.to_datetime(df['start_time'])  
            
            mask = (df['start_time'] > detections.iloc[0]['event_start_time']-timedelta(seconds = moving_window)) & (df['start_time'] < detections.iloc[0]['event_start_time']+timedelta(seconds = moving_window))
            df = df.loc[mask]
            dtfl = h5py.File(file_name, 'r')
            dataset = dtfl.get('data/'+df['trace_name'].to_list()[0]) 
            data = np.array(dataset)
                
            cft = recursive_sta_lta(data[:,2], int(2.5 * 100), int(10. * 100))
            on_of = trigger_onset(cft, thr_on, thr_of)
            if len(on_of) >= 1:                    
                p_pick, s_pick = ar_pick(data[:,2], data[:,1], data[:,0], 100, 1.0, 20.0, 1.0, 0.1, 4.0, 1.0, 2, 8, 0.1, 0.2)
                if (on_of[0][1]+100)/100 > p_pick > (on_of[0][0]-100)/100: 
                   # print('got one')
                    new_picks['traceID'] = df['trace_name'].to_list()[0]
                    new_picks['network'] = dataset.attrs["network_code"]
                    new_picks['station'] = sttt
                    new_picks['instrument_type'] = df['trace_name'].to_list()[0].split('_')[2]
                    new_picks['stlat'] = round(dataset.attrs["receiver_latitude"], 4)
                    new_picks['stlon'] = round(dataset.attrs["receiver_longitude"], 4)
                    new_picks['stelv'] = round(dataset.attrs["receiver_elevation_m"], 2)
                    new_picks['event_start_time'] = datetime.strptime(str(UTCDateTime(dataset.attrs['trace_start_time'].replace(' ', 'T')+'Z')+(on_of[0][0]/100)).replace('T', ' ').replace('Z', ''), '%Y-%m-%d %H:%M:%S.%f')
                    new_picks['event_end_time'] = datetime.strptime(str(UTCDateTime(dataset.attrs['trace_start_time'].replace(' ', 'T')+'Z')+(on_of[0][1]/100)).replace('T', ' ').replace('Z', ''), '%Y-%m-%d %H:%M:%S.%f')
                    new_picks['detection_prob'] = 0.3
                    new_picks['detection_unc'] = 0.6
                    new_picks['p_arrival_time'] = datetime.strptime(str(UTCDateTime(dataset.attrs['trace_start_time'].replace(' ', 'T')+'Z')+p_pick).replace('T', ' ').replace('Z', ''), '%Y-%m-%d %H:%M:%S.%f')
                    new_picks['p_prob'] = 0.3
                    new_picks['p_unc'] = 0.6
                    new_picks['p_snr'] = None
                    new_picks['s_arrival_time'] = None
                    new_picks['s_prob'] = 0.0
                    new_picks['s_unc'] = None
                    new_picks['s_snr'] = None
                    new_picks['amp'] = None
                    detections = detections.append(new_picks , ignore_index=True)      
    return detections                    
예제 #8
0
 def test_ar_pick(self):
     """
     Test ar_pick against implementation for UNESCO short course
     """
     data = []
     for channel in ['z', 'n', 'e']:
         file = os.path.join(self.path,
                             'loc_RJOB20050801145719850.' + channel)
         data.append(np.loadtxt(file, dtype=np.float32))
     # some default arguments
     samp_rate, f1, f2, lta_p, sta_p, lta_s, sta_s, m_p, m_s, l_p, l_s = \
         200.0, 1.0, 20.0, 1.0, 0.1, 4.0, 1.0, 2, 8, 0.1, 0.2
     ptime, stime = ar_pick(data[0], data[1], data[2], samp_rate, f1, f2,
                            lta_p, sta_p, lta_s, sta_s, m_p, m_s, l_p, l_s)
     self.assertAlmostEqual(ptime, 30.6350002289)
     # seems to be strongly machine dependent, go for int for 64 bit
     # self.assertAlmostEqual(stime, 31.2800006866)
     self.assertEqual(int(stime + 0.5), 31)
예제 #9
0
def pick_ar(stream, picker_config=None, config=None):
    """Wrapper around the AR P-phase picker.

    Args:
        stream (StationStream):
            Stream containing waveforms that need to be picked.
        picker_config (dict):
            Dictionary with parameters for AR P-phase picker. See picker.yml.
        config (dict):
            Configuration dictionary. Key value here is:
                windows:
                    window_checks:
                        min_noise_duration
    Returns:
        tuple:
            - Best estimate for p-wave arrival time (s since start of trace).
            - Mean signal to noise ratio based on the pick.
    """
    if picker_config is None:
        picker_config = get_config(section='pickers')
    if config is None:
        config = get_config()
    min_noise_dur = config['windows']['window_checks']['min_noise_duration']
    params = picker_config['ar']
    # Get the east, north, and vertical components from the stream
    st_e = stream.select(channel='??[E1]')
    st_n = stream.select(channel='??[N2]')
    st_z = stream.select(channel='??[Z3]')

    # Check if we found one of each component
    # If not, use the next picker in the order of preference
    if len(st_e) != 1 or len(st_n) != 1 or len(st_z) != 1:
        raise BaseException('Unable to perform AR picker.')

    minloc = ar_pick(st_z[0].data, st_n[0].data, st_e[0].data,
                     st_z[0].stats.sampling_rate,
                     **params)[0]
    if minloc < min_noise_dur:
        fmt = 'Noise window (%.1f s) less than minimum (%.1f)'
        tpl = (minloc, min_noise_dur)
        raise ValueError(fmt % tpl)
    mean_snr = calc_snr(stream, minloc)

    return (minloc, mean_snr)
def pick_ar(stream, picker_config=None, config=None):
    """Wrapper around the AR P-phase picker.

    Args:
        stream (StationStream):
            Stream containing waveforms that need to be picked.
        picker_config (dict):
            Dictionary with parameters for AR P-phase picker. See picker.yml.
        config (dict):
            Configuration dictionary. Key value here is:
                windows:
                    window_checks:
                        min_noise_duration
    Returns:
        tuple:
            - Best estimate for p-wave arrival time (s since start of trace).
            - Mean signal to noise ratio based on the pick.
    """
    if picker_config is None:
        picker_config = get_config(section='pickers')
    if config is None:
        config = get_config()
    min_noise_dur = config['windows']['window_checks']['min_noise_duration']
    params = picker_config['ar']
    # Get the east, north, and vertical components from the stream
    st_e = stream.select(channel='??[E1]')
    st_n = stream.select(channel='??[N2]')
    st_z = stream.select(channel='??[Z3]')

    # Check if we found one of each component
    # If not, use the next picker in the order of preference
    if len(st_e) != 1 or len(st_n) != 1 or len(st_z) != 1:
        raise GMProcessException('Unable to perform AR picker.')

    minloc = ar_pick(st_z[0].data, st_n[0].data, st_e[0].data,
                     st_z[0].stats.sampling_rate,
                     **params)[0]
    if minloc < min_noise_dur:
        fmt = 'Noise window (%.1f s) less than minimum (%.1f)'
        tpl = (minloc, min_noise_dur)
        raise GMProcessException(fmt % tpl)
    mean_snr = calc_snr(stream, minloc)

    return (minloc, mean_snr)
예제 #11
0
    def test_ar_pick_low_amplitude(self):
        """
        Test ar_pick with low amplitude data
        """
        data = []
        for channel in ['z', 'n', 'e']:
            file = os.path.join(self.path,
                                'loc_RJOB20050801145719850.' + channel)
            data.append(np.loadtxt(file, dtype=np.float32))

        # articially reduce signal amplitude
        for d in data:
            d /= 10.0 * d.max()

        # some default arguments
        samp_rate, f1, f2, lta_p, sta_p, lta_s, sta_s, m_p, m_s, l_p, l_s = \
            200.0, 1.0, 20.0, 1.0, 0.1, 4.0, 1.0, 2, 8, 0.1, 0.2
        ptime, stime = ar_pick(data[0], data[1], data[2], samp_rate, f1, f2,
                               lta_p, sta_p, lta_s, sta_s, m_p, m_s, l_p, l_s)
        self.assertAlmostEqual(ptime, 30.6350002289)
        # seems to be strongly machine dependent, go for int for 64 bit
        # self.assertAlmostEqual(stime, 31.2800006866)
        self.assertEqual(int(stime + 0.5), 31)
예제 #12
0
파일: test_trigger.py 프로젝트: Brtle/obspy
    def test_ar_pick_low_amplitude(self):
        """
        Test ar_pick with low amplitude data
        """
        data = []
        for channel in ['z', 'n', 'e']:
            file = os.path.join(self.path,
                                'loc_RJOB20050801145719850.' + channel)
            data.append(np.loadtxt(file, dtype=np.float32))

        # articially reduce signal amplitude
        for d in data:
            d /= 10.0 * d.max()

        # some default arguments
        samp_rate, f1, f2, lta_p, sta_p, lta_s, sta_s, m_p, m_s, l_p, l_s = \
            200.0, 1.0, 20.0, 1.0, 0.1, 4.0, 1.0, 2, 8, 0.1, 0.2
        ptime, stime = ar_pick(data[0], data[1], data[2], samp_rate, f1, f2,
                               lta_p, sta_p, lta_s, sta_s, m_p, m_s, l_p, l_s)
        self.assertAlmostEqual(ptime, 30.6350002289)
        # seems to be strongly machine dependent, go for int for 64 bit
        # self.assertAlmostEqual(stime, 31.2800006866)
        self.assertEqual(int(stime + 0.5), 31)
예제 #13
0
    def AR_AIC(self, dataset, batch_size=100):
        """
        AR_AIC method
        :param dataset: Dataset name.
        :param batch_size: Model directory name.
        :return:
        """
        dataset_path, eval_path = self.get_eval_dir(dataset)

        dataset = seisnn.io.read_dataset(dataset)
        data_len = self.get_dataset_length(self.database)
        progbar = tf.keras.utils.Progbar(data_len)
        df = 100
        n = 0
        for val in dataset.prefetch(100).batch(batch_size):
            progbar.add(batch_size)
            title = f"eval_{n:0>5}"
            trace_len = val['trace'].shape[2]
            batch_len = val['trace'].shape[0]
            for i in range(batch_len):
                x_z = val['trace'].numpy()[i, :, :, 0]
                x_n = val['trace'].numpy()[i, :, :, 1]
                x_e = val['trace'].numpy()[i, :, :, 2]
                p_pick, s_pick = ar_pick(a=x_z,
                                         b=x_n,
                                         c=x_e,
                                         samp_rate=df,
                                         f1=1.0,
                                         f2=45,
                                         lta_p=2,
                                         sta_p=0.3,
                                         lta_s=2,
                                         sta_s=0.3,
                                         m_p=2,
                                         m_s=8,
                                         l_p=0.1,
                                         l_s=0.2)
예제 #14
0
    for sta in trigger['stations']:
        st_trig = st_cp.select(station=sta).copy()
        # Limiting time span for picking
        st_trig = st_trig.trim(t-30, t+120)
        for trc in st_trig:
            start = trc.stats.starttime
            sta = trc.stats.station
            loc = trc.stats.location
            chn = trc.stats.channel  
            inst = trc.stats.sac.kinst.replace(" ", "_")      

            createFolder('../event/%s'%(start.strftime('%Y.%m.%d.%H.%M.%S')))
            trc.write('../event/%s/%s.%s.%s.%s.R.sac'%(start.strftime('%Y.%m.%d.%H.%M.%S'),start.strftime('%Y.%m.%d.%H.%M.%S'),sta,loc,chn), format='SAC')

        st_trig.detrend(type='demean')
        pick = ar_pick(st_trig[2], st_trig[1], st_trig[0], df, 1, 20, 1, 0.1, 4, 1, 2, 8, 0.1, 0.35)
        # Frequency of the lower bandpass window = 1
        # Frequency of the upper bandpass window = 20
        # Length of LTA for the P arrival in seconds = 1s
        # Length of STA for the P arrival in seconds = 0.1s
        # Length of LTA for the S arrival in seconds = 4s
        # Length of STA for the S arrival in seconds = 1 s
        # Number of AR coefficients for the P arrival = 2
        # Number of AR coefficients for the S arrival = 8
        # Length of variance window for the P arrival in seconds = 0.1
        # Length of variance window for the S arrival in seconds = 0.2
        pick_P = st_trig[2].stats.starttime + pick[0]
        pick_S = st_trig[2].stats.starttime + pick[1]

        # Calculate maximum amplitude for P phase from Z component only, trim waveform from pick P and next 0.5 second
        ampl_P = max(abs(st_trig[2].copy().trim(pick_P, pick_P + 0.5).data))
예제 #15
0
def signal_split(st,
                 event_time=None,
                 event_lon=None,
                 event_lat=None,
                 method='velocity',
                 vsplit=7.0,
                 picker_config=None):
    """
    This method tries to identifies the boundary between the noise and signal
    for the waveform. The split time is placed inside the
    'processing_parameters' key of the trace stats.

    If split_method is 'velocity', then the split between the noise and signal
    window is approximated as the arrival time of a phase with velocity equal
    to vsplit.

    If split_method is equal to 'p_arrival', then the P-wave arrival is
    used as the split between the noise and signal windows. Multiple picker
    methods are suppored and can be configured in the config file
    '~/.gmprocess/picker.yml

    Args:
        st (StationStream):
            Stream of data.
        event_time (UTCDateTime):
            Event origin time.
        event_lon (float):
            Event longitude.
        event_lat (float):
            Event latitude.
        method (str):
            Method for splitting noise and signal windows. Either 'p_arrival'
            or 'velocity'.
        vsplit (float):
            Velocity (km/s) for splitting noise and signal.

    Returns:
        trace with stats dict updated to include a
        stats['processing_parameters']['signal_split'] dictionary.
    """
    if picker_config is None:
        picker_config = get_config(picker=True)

    if method == 'p_arrival':
        preferred_picker = picker_config['order_of_preference'][0]

        if preferred_picker == 'ar':
            # Get the east, north, and vertical components from the stream
            st_e = st.select(channel='??[E1]')
            st_n = st.select(channel='??[N2]')
            st_z = st.select(channel='??[Z3]')

            # Check if we found one of each component
            # If not, use the next picker in the order of preference
            if len(st_e) != 1 or len(st_n) != 1 or len(st_z) != 1:
                logging.warning('Unable to perform AR picker.')
                logging.warning('Using next available phase picker.')
                preferred_picker = picker_config['order_of_preference'][1]
            else:
                tdiff = ar_pick(st_z[0].data, st_n[0].data, st_e[0].data,
                                st_z[0].stats.sampling_rate,
                                **picker_config['ar'])[0]
                tsplit = st[0].stats.starttime + tdiff

        if preferred_picker in ['baer', 'cwb']:
            tdiffs = []
            for tr in st:
                if preferred_picker == 'baer':
                    pick_sample = pk_baer(tr.data, tr.stats.sampling_rate,
                                          **picker_config['baer'])[0]
                    tr_tdiff = pick_sample * tr.stats.delta
                else:
                    tr_tdiff = PowerPicker(tr)[0] - tr.stats.starttime
                tdiffs.append(tr_tdiff)
            tdiff = min(tdiffs)
            tsplit = st[0].stats.starttime + tdiff

        if preferred_picker not in ['ar', 'baer', 'cwb']:
            raise ValueError('Not a valid picker.')

    elif method == 'velocity':
        epi_dist = gps2dist_azimuth(
            lat1=event_lat,
            lon1=event_lon,
            lat2=st[0].stats['coordinates']['latitude'],
            lon2=st[0].stats['coordinates']['longitude'])[0] * M_TO_KM
        tsplit = event_time + epi_dist / vsplit
        preferred_picker = None
    else:
        raise ValueError('Split method must be "p_arrival" or "velocity"')

    if tsplit >= st[0].times('utcdatetime')[0]:
        # Update trace params
        split_params = {
            'split_time': tsplit,
            'method': method,
            'vsplit': vsplit,
            'picker_type': preferred_picker
        }
        for tr in st:
            tr.setParameter('signal_split', split_params)

    return st
                #print(file[0:-4]+" Starttime:"+onofftime[i]+" endtime:"+onofftime[i+1])

                tr_bhe = read(BHEfile,
                              starttime=UTCDateTime(onofftime[i]),
                              endtime=UTCDateTime(onofftime[i + 1]))
                tr_bhz = read(BHZfile,
                              starttime=UTCDateTime(onofftime[i]),
                              endtime=UTCDateTime(onofftime[i + 1]))
                #print("finished reading")

                station = tr_bhz[0].stats.station

                df = tr_bhz[0].stats.sampling_rate
                #print("finished reading df")
                p_pick, s_pick = ar_pick(tr_bhz[0].data, tr_bhn[0].data,
                                         tr_bhe[0].data, df, 1.0, 20.0, 1.0,
                                         0.1, 4.0, 1.0, 2, 8, 0.1, 0.2, True)
                #print("finished reading ar piicker")
                print(file[0:-4] + "p_pick: " + str(p_pick) + " s_pick: " +
                      str(s_pick) + " Of slice: " + str(j))
                #tr_bhz.plot()
                del tr_bhz
                del tr_bhn
                del tr_bhe

                p_pick_actual = convertformat(p_pick,
                                              UTCDateTime(onofftime[i]))
                s_pick_actual = convertformat(s_pick,
                                              UTCDateTime(onofftime[i]))

            except IndexError:
import numpy as np
import datetime
from obspy.core import read
from obspy.signal.trigger import ar_pick
from obspy.taup import TauPyModel

for i in range(1, 31):
    tr1 = read('/Users/Nishita/Documents/Research/example30/*' + str(i) +
               '*BHZ.SAC')[0]
    tr2 = read('/Users/Nishita/Documents/Research/example30/*' + str(i) +
               '*BHN.SAC')[0]
    tr3 = read('/Users/Nishita/Documents/Research/example30/*' + str(i) +
               '*BHE.SAC')[0]

    df = tr1.stats.sampling_rate
    p_pick, s_pick = ar_pick(tr1.data, tr2.data, tr3.data, df, 1.0, 20.0, 1.0,
                             0.1, 4.0, 1.0, 2, 8, 0.1, 0.2)
    data = read('/Users/Nishita/Documents/Research/example30/*' + str(i) +
                '*BHZ.SAC')
    ti = data[
        0].stats.starttime  #The result is UTC Date Time(2008, 7, 30, 16, 0, 1, 320000)
    #Transfer UTC time to Unix timestamp (second)
    ti_unix = float(ti.strftime("%s.%f"))  #The result is 1217404801.32
    # for example, if you find the aftershock at the 67321th data point, then the standard format should be :
    p_pick_time = float(
        datetime.datetime.fromtimestamp(ti_unix + 8 * 3600 +
                                        p_pick).strftime('%Y%m%d%H%M%S.%f'))
    s_pick_time = float(
        datetime.datetime.fromtimestamp(ti_unix + 8 * 3600 +
                                        s_pick).strftime('%Y%m%d%H%M%S.%f'))
    p_time = round(p_pick_time, 2)
    s_time = round(s_pick_time, 2)
                #print(file[0:-4]+" Starttime:"+onofftime[i]+" endtime:"+onofftime[i+1])

                tr_bhe = read(BHEfile,
                              starttime=UTCDateTime(onofftime[i]),
                              endtime=UTCDateTime(onofftime[i + 1]))
                tr_bhz = read(BHZfile,
                              starttime=UTCDateTime(onofftime[i]),
                              endtime=UTCDateTime(onofftime[i + 1]))
                #print("finished reading")

                station = tr_bhz[0].stats.station

                df = tr_bhz[0].stats.sampling_rate
                #print("finished reading df")
                p_pick, s_pick = ar_pick(tr_bhz[0].data, tr_bhn[0].data,
                                         tr_bhe[0].data, df, 1.0, 20.0, 0.2,
                                         0.05, 0.4, 0.1, 3, 8, 0.1, 0.2, True)
                #print("finished reading ar piicker")
                print(file[0:-4] + "p_pick: " + str(p_pick) + " s_pick: " +
                      str(s_pick) + " Of slice: " + str(j))
                tr_bhz.plot()
                del tr_bhz
                del tr_bhn
                del tr_bhe

                p_pick_actual = convertformat(p_pick,
                                              UTCDateTime(onofftime[i]))
                s_pick_actual = convertformat(s_pick,
                                              UTCDateTime(onofftime[i]))

            except IndexError:
                      starttime=(dtn + onsettime[0] / 100 - 10),
                      endtime=(dtn + onsettime[1] / 100 + 10))[0]
        #print("aahhh2"+str(onsettime[0])+str(onsettime[1]))
        tr_bhe = read('/Volumes/Seagate Expansion Drive/after/*' + '.2008' +
                      str(i) + '*.BHE*',
                      starttime=(dte + onsettime[0] / 100 - 10),
                      endtime=(dte + onsettime[1] / 100 + 10))[0]
        #print("aahhh3"+str(onsettime[0])+str(onsettime[1]))
        tr_bhz = read('/Volumes/Seagate Expansion Drive/after/*' + '.2008' +
                      str(i) + '*.BHZ*',
                      starttime=(dtz + onsettime[0] / 100 - 10),
                      endtime=(dtz + onsettime[1] / 100 + 10))[0]
        #print("aahhh4"+str(onsettime[0])+str(onsettime[1]))
        onsettime[:] = []
        df = tr_bhz.stats.sampling_rate
        p_pick, s_pick = ar_pick(tr_bhz.data, tr_bhn.data, tr_bhe.data, df,
                                 1.0, 20.0, 1.0, 0.1, 4.0, 1.0, 2, 8, 0.1, 0.2)
        print("p_pick: " + str(p_pick) + " s_pick: " + str(s_pick) +
              " Of slice: " + str(j))
        tr_bhz.plot(type='relative')
        del tr_bhn
        del tr_bhe
        del tr_bhz
    #tr.plot(type='relative')
    #plt.plot(cft, 'k')
    #plt.show()
    #plot_trigger(tr_n, cftn, 2, 0.5)

    #------------------BHE------------------------------------

    dfe = tr_e.stats.sampling_rate
              if len(pstimes) <= 1:
                  continue
              if pstimes[1] - pstimes[0] > 0:
                  Samp = pswaves[0]

              else :
                  Samp = pswaves[1]
  
                  
# ------------------------------------------------------------------------------------------------------------------------                 
          df = tr.stats.sampling_rate
          BAp_pick, phase_info = pk_baer(tr.data[startwave:endwave], df,
                               20, 60, 7.0, 12.0, 100, 100)

          df = tr.stats.sampling_rate
          ARp_pick, S_pick = ar_pick(tr.data[startwave:endwave], tr.data[startwave:endwave], tr.data[startwave:endwave],df,
                             1.0, 20.0, 1.0, 0.1, 4.0, 1.0, 2, 8, 0.1, 0.2, s_pick=True )
          
          Bap = int((startwave + BAp_pick))
          Arp = int((startwave) + (ARp_pick*100))
          Pwave = round((Bap+Arp)/2)
          Swave = int((startwave) + (S_pick*100))
          # P-wave Amplitude
          Pamp = abs(trdata[Arp]) 

      
          plt.figure(figNumber)
          plt.plot(trdata[startwave:endwave],'b',trdata[startwave:Swave],'y',trdata[startwave:Pwave],'r')

          plt.xlabel('Centiseconds of Event ')
          plt.ylabel('Count')
          EventName = "Event " + str(figNumber)