예제 #1
0
def _checkSTALTA(st, filt, STATime, LTATime, limit):
    """
    Take a stream and make sure it's vert. component (or first comp
    if no vert) does not exceed limit given STATime and LTATime
    Return True if passes, false if fails
    """
    if limit is None:
        return True
    if len(st) < 1:
        return None
    try:
        stz = st.select(component='Z')[0]
    except IndexError:  # if no Z found on trace
        return None
    if len(stz) < 1:
        stz = st[0]
    sz = stz.copy()
    sr = sz.stats.sampling_rate
    ltaSamps = LTATime * sr
    staSamps = STATime * sr
    cft = classic_sta_lta(sz.data, staSamps, ltaSamps)
    if np.max(cft) <= limit:
        return True
    else:
        sta = sz.stats.station
        t1 = sz.stats.starttime
        t2 = sz.stats.endtime
        msg = ('%s fails sta/lta req of %d between %s and %s' %
               (sta, limit, t1, t2))
        detex.log(__name__, msg, level='warn')
        return False
예제 #2
0
    def save_triggers(self):

        # ARREGLAR PARA QUE SE GUARDE EN UNA CARPETA CUYO
        # NOMBRE TENGA EL VALOR DEL TRIGGER

        trig_traces = []
        cfts = []
        df = pd.DataFrame(columns=["Trace number", "Trigger"])

        for i, tr in enumerate(self.dataset):
            trig = 0
            cft = classic_sta_lta(tr, int(5 * self.fs), int(10 * self.fs))
            cfts.append(cft)
            print(i)

            if np.max(cft) > self.thresh:
                trig_traces.append(tr)
                self.plot_trace(tr, i)
                trig = 1

            df.loc[i] = [i, trig]

        os.makedirs("STA-LTA-Triggers", exist_ok=True)
        df.to_csv(f"STA-LTA-Triggers/{self.dataset_name}_{self.thresh}.csv",
                  index=False)

        trig_traces = np.asarray(trig_traces)

        os.makedirs("Data/Trigger", exist_ok=True)
        np.save(f"Data/Trigger/{self.dataset_name}_{self.thresh}.npy",
                trig_traces)

        return np.asarray(cfts)
def check_sta_lta(st, sta_length=1.0, lta_length=20.0, threshold=5.0):
    '''
    Checks that the maximum STA/LTA ratio for AT LEAST ONE of the stream's
    traces is above a certain threshold.

    Args:
        st (obspy.core.stream.Stream):
            Stream of data.
        sta_length (float):
            Length of time window for STA (seconds).
        lta_length (float):
            Length of time window for LTA (seconds).
        threshold (float):
            Required maximum STA/LTA ratio to pass the test.

    Returns:
        Stream that has been checked for sta/lta requirements.
    '''
    if not st.passed:
        return st

    for tr in st:
        sr = tr.stats.sampling_rate
        nlta = lta_length * sr + 1
        if len(tr) >= nlta:
            sta_lta = classic_sta_lta(tr, sta_length * sr + 1, nlta)
            if max(sta_lta) < threshold:
                tr.fail('Failed sta/lta check because threshold sta/lta '
                        'is not exceeded.')
        else:
            tr.fail('Failed sta/lta check because record length is shorter '
                    'than lta length.')

    return st
예제 #4
0
def check_sta_lta(st, sta_length=1.0, lta_length=20.0, threshold=5.0):
    '''
    Checks that the maximum STA/LTA ratio for AT LEAST ONE of the stream's
    traces is above a certain threshold.

    Args:
        st (obspy.core.stream.Stream):
            Stream of data.
        sta_length (float):
            Length of time window for STA (seconds).
        lta_length (float):
            Length of time window for LTA (seconds).
        threshold (float):
            Required maximum STA/LTA ratio to pass the test.

    Returns:
        Stream that has been checked for sta/lta requirements.
    '''
    if not st.passed:
        return st

    for tr in st:
        sr = tr.stats.sampling_rate
        nlta = lta_length * sr + 1
        if len(tr) >= nlta:
            sta_lta = classic_sta_lta(tr, sta_length * sr + 1, nlta)
            if max(sta_lta) < threshold:
                tr.fail('Failed sta/lta check because threshold sta/lta '
                        'is not exceeded.')
        else:
            tr.fail('Failed sta/lta check because record length is shorter '
                    'than lta length.')

    return st
예제 #5
0
def check_sta_lta(tr, sta_length=1.0, lta_length=20.0, threshold=5.0):
    '''
    Checks that the maximum STA/LTA ratio of the trace is above a certain
    threshold.

    Args:
        trace (obspy.core.trace.Trace): Trace of strong motion data.
        sta_length (float): Length of time window for STA (seconds).
        lta_length (float): Length of time window for LTA (seconds).
        threshold (float): Required maximum STA/LTA ratio to pass the test.

    Returns:
    bool: True if trace passes the check. False otherwise.
    '''

    sta_lta_params = {
        'sta_length': sta_length,
        'lta_length': lta_length,
        'sta_lta_threshold': threshold
    }
    tr = _update_params(tr, 'sta_lta', sta_lta_params)
    df = tr.stats.sampling_rate
    sta_lta = classic_sta_lta(tr, sta_length * df, lta_length * df)
    if max(sta_lta) >= threshold:
        return True
    else:
        return False
예제 #6
0
 def trigger(self, data):
     self.buf = np.append(self.buf[-self.nlta:], data)
     if self.buf.size > self.nlta:
         out = classic_sta_lta(self.buf, self.nsta, self.nlta)[-data.size:]
     else:
         out = np.require(np.zeros(data.size), dtype='float')
     return out
예제 #7
0
def prefgen_vector(dataset, fbs, dt=2, nspad=200, hwin=150, vlen=10):
    iprint = 0
    feature_matrix = np.zeros((len(dataset), (9 * (vlen + 1)) + 1))
    for trcid, trc in enumerate(dataset):
        ftrc = []
        trc = dataset[trcid]
        fb = fbs[trcid]
        pad = np.random.rand(nspad) / 100
        trc_norm = trc / np.amax(np.abs(trc))
        trc_norm_padded = np.hstack((pad, trc_norm))
        ftrc = np.append(
            ftrc, trc_norm_padded[np.int(nspad + fb / dt) -
                                  vlen:np.int(nspad + fb / dt) + 1])
        ftrc = np.append(
            ftrc,
            norm(np.gradient(
                np.abs(trc_norm_padded)))[np.int(nspad + fb / dt) -
                                          vlen:np.int(nspad + fb / dt) + 1])
        trc_entropy = entropy(trc_norm_padded, 50)
        ftrc = np.append(
            ftrc,
            norm(trc_entropy)[np.int(nspad + fb / dt) -
                              vlen:np.int(nspad + fb / dt) + 1])
        ftrc = np.append(
            ftrc,
            norm(np.gradient(trc_entropy))[np.int(nspad + fb / dt) -
                                           vlen:np.int(nspad + fb / dt) + 1])
        trc_fdm = fdm(trc_norm_padded, 50, np.arange(1, 4), 15)
        ftrc = np.append(
            ftrc,
            norm(trc_fdm)[np.int(nspad + fb / dt) -
                          vlen:np.int(nspad + fb / dt) + 1])
        ftrc = np.append(
            ftrc,
            norm(np.gradient(trc_fdm))[np.int(nspad + fb / dt) -
                                       vlen:np.int(nspad + fb / dt) + 1])
        trc_slta = trigger.classic_sta_lta(trc_norm_padded, 2, 100)
        ftrc = np.append(
            ftrc,
            norm(trc_slta)[np.int(nspad + fb / dt) -
                           vlen:np.int(nspad + fb / dt) + 1])
        trc_fq_win_sum = fq_win_sum(trc_norm_padded, hwin, dt)
        ftrc = np.append(
            ftrc,
            norm(trc_fq_win_sum)[np.int(nspad + fb / dt) -
                                 vlen:np.int(nspad + fb / dt) + 1])
        ftrc = np.append(
            ftrc,
            norm(
                np.gradient(trc_fq_win_sum))[np.int(nspad + fb / dt) -
                                             vlen:np.int(nspad + fb / dt) + 1])
        ftrc = np.append(ftrc, 1)
        feature_matrix[trcid, :] = ftrc
        if trcid == iprint:
            print('Feature vector for trace No. ', trcid, 'completed')
            iprint += 100
    return feature_matrix
예제 #8
0
def compute_sta_lta(data,
                    fm,
                    trigger_type,
                    nlta=10.0,
                    nsta=5.0,
                    trig_on=1.2,
                    trig_off=0.5):
    """
    Function that handles the building of STA/LTA event picking: classic, recursive and delayed. It follows Obspy
    implementation of these algorithms and can be interfaced with the main GUI to plot the results, or with the
    CLI to other analysis routines. A detailed comparison of STA/LTA techniques algorithms are included in:

    Withers, M., Aster, R., Young, C., Beiriger, J., Harris, M., Moore, S., and Trujillo, J. (1998),
    A comparison of select trigger algorithms for automated global seismic phase and event detection,
    Bulletin of the Seismological Society of America, 88 (1), 95-106.
    http://www.bssaonline.org/content/88/1/95.abstract

    Args:
        data : Numpy Array
            The seismic data we want to apply our STA/LTA routine
        fm : float
            The sampling frequency of the main trace
        trigger_type : str
            A string identifiying which trigger type we want (Recursive, Delayed, Classic)
        nlta : float
            Length of the Long Time Average Window (LTA)
        nsta : float
            Length of the Short Time Average Window (STA)
        trig_on : float
            Value of the CF to consider as an activation trigger
        trig_off : float
            Value of the CF to consider as a de-activation trigger
    Returns:
        cft: Numpy Array
            The characteristic function result of the
        on_of: Tuple
            A data tuple containing the on/ofs times of the even picking
    """

    if np.isnan(data).any():
        data = merge_numpy(data)
    try:

        if trigger_type == "Recursive":
            cft = recursive_sta_lta(data, int(nsta * fm), int(nlta * fm))
        elif trigger_type == "Delayed":
            cft = delayed_sta_lta(data, int(nsta * fm), int(nlta * fm))
        else:
            cft = classic_sta_lta(data, int(nsta * fm), int(nlta * fm))

        on_of = trigger_onset(cft, trig_on, trig_off)

        return cft, on_of

    except ArithmeticError:
        print "Problem whilst computing the trigger"
예제 #9
0
 def test_classic_sta_lta_c_python(self):
     """
     Test case for ctypes version of recursive_sta_lta
     """
     nsta, nlta = 5, 10
     c1 = classic_sta_lta(self.data, nsta, nlta)
     c2 = classic_sta_lta_py(self.data, nsta, nlta)
     self.assertTrue(np.allclose(c1, c2, rtol=1e-10))
     ref = np.array([0.38012302, 0.37704431, 0.47674533, 0.67992292])
     self.assertTrue(np.allclose(ref, c2[99:103]))
예제 #10
0
def postfgen_false_vector(dataset, fbs, dt=2, hwin=150, fb_hzone=500, vlen=10):
    iprint = 0
    feature_matrix = np.zeros((len(dataset), (9 * (vlen + 1)) + 1))
    for trcid, trc in enumerate(dataset):
        ftrc = []
        trc = dataset[trcid]
        fb_true = fbs[trcid]
        fb = fb_true
        while np.logical_or(fb == fb_true, fb < 0):
            fb = np.int(
                np.random.uniform(fb_true - fb_hzone, fb_true + fb_hzone, 1))
        trc_norm = trc / np.amax(np.abs(trc))
        ftrc = np.append(ftrc,
                         trc_norm[np.int(fb / dt):np.int(fb / dt) + vlen + 1])
        ftrc = np.append(
            ftrc,
            norm(np.gradient(
                np.abs(trc_norm)))[np.int(fb / dt):np.int(fb / dt) + vlen + 1])
        trc_entropy = entropy(trc_norm, 50)
        ftrc = np.append(
            ftrc,
            norm(trc_entropy)[np.int(fb / dt):np.int(fb / dt) + vlen + 1])
        ftrc = np.append(
            ftrc,
            norm(np.gradient(trc_entropy))[np.int(fb / dt):np.int(fb / dt) +
                                           vlen + 1])
        trc_fdm = fdm(trc_norm, 50, np.arange(1, 4), 15)
        ftrc = np.append(
            ftrc,
            norm(trc_fdm)[np.int(fb / dt):np.int(fb / dt) + vlen + 1])
        ftrc = np.append(
            ftrc,
            norm(np.gradient(trc_fdm))[np.int(fb / dt):np.int(fb / dt) + vlen +
                                       1])
        trc_slta = trigger.classic_sta_lta(trc_norm, 2, 100)
        ftrc = np.append(
            ftrc,
            norm(trc_slta)[np.int(fb / dt):np.int(fb / dt) + vlen + 1])
        trc_fq_win_sum = fq_win_sum(trc_norm, hwin, dt)
        ftrc = np.append(
            ftrc,
            norm(trc_fq_win_sum)[np.int(fb / dt):np.int(fb / dt) + vlen + 1])
        ftrc = np.append(
            ftrc,
            norm(np.gradient(trc_fq_win_sum))[np.int(fb / dt):np.int(fb / dt) +
                                              vlen + 1])
        ftrc = np.append(ftrc, 0)
        feature_matrix[trcid, :] = ftrc
        if trcid == iprint:
            print('Feature vector for trace No. ', trcid, 'completed')
            iprint += 100
    return feature_matrix
예제 #11
0
def stalta(data,
           nsta=3,
           nlta=10,
           threson=1.08,
           thresoff=1.05,
           offset=40,
           **kwargs):
    """
    Utilises :func:`~obspy.signal.trigger.classic_sta_lta` to remove outliers

    :type data: :class:`~numpy.ndarray`
    :param data: x-y data in Nx2 array, shape (N, 2)
    :type nsta: int
    :param nsta: Length of short time average window in samples
    :type nlta: int
    :param nlta: Length of long time average window in samples
    :type threson: float
    :param threson: Value above which trigger (of characteristic function)
                    is activated (higher threshold)
    :type thresoff: float
    :param thresoff: Value below which trigger (of characteristic function)
                     is deactivated (lower threshold)
    :type offset: int
    :param offset: in samples, how many additional samples are removed before
                   on trigger and after off trigger
    :rtype: :class:`~numpy.ndarray`
    :return: mask array containing bool values
    """
    orig_N = data.shape[0]

    nanbools = np.isnan(data[:, 1])
    nanindices = np.where(nanbools)[0]
    data = data[nanbools == False]

    cft = classic_sta_lta(data[:, 1], nsta, nlta)
    trigger_onoff = trigger_onset(cft, threson, thresoff)

    def inside_to(x_):
        for to in trigger_onoff:
            if to[0] - offset <= x_ <= to[1] + offset:
                return False
        return True

    filter_ = np.vectorize(inside_to)

    xi = np.where(filter_(np.arange(data.shape[0])))[0]

    for i in nanindices:
        xi[i <= xi] += 1

    return _get_mask(True, orig_N, xi, nanindices=nanindices)
예제 #12
0
    def __init__(self, dataset_path, savepath):
        super(DatasetBelgica, self).__init__()

        self.dataset_path = dataset_path
        self.savepath = savepath
        self.fs = 10
        self.n_traces = 7000

        # Dataset de ruido, no es necesario preprocesar
        print(f"Reading dataset from path: {self.dataset_path}")
        self.traces = sio.loadmat(self.dataset_path)["Data_2D"]

        self.noise_traces = np.empty((0, 6000))

        # ventanas en tiempo
        sta_t = 3
        lta_t = 25

        # ventanas en muestras
        sta_n = sta_t * self.fs * 10
        lta_n = lta_t * self.fs * 10

        copied = 0

        for trace in self.traces:
            trace = trace.reshape(-1, 6000)

            for tr in trace:
                tr = detrend(tr)
                tr = tr - np.mean(tr)
                tr /= np.amax(tr)

                cft = classic_sta_lta(tr, sta_n, lta_n)

                if np.amax(cft) < 2:
                    self.noise_traces = np.vstack([self.noise_traces, tr])
                    copied += 1

                if not (copied % 100):
                    print(f"copied: {copied}")

                if copied == self.n_traces:
                    break

            if copied == self.n_traces:
                break

        print(f"Saving npy format dataset in {self.savepath}")
        if not os.path.exists(f'{self.savepath}/Belgica.npy'):
            self.save_dataset(self.noise_traces, self.savepath, 'Belgica')
예제 #13
0
def update_cft(prev_val, selected=None):
    print(ticker_alg.value)
    if ticker_alg.value == 'Classic STA/LTA':
        from obspy.signal.trigger import classic_sta_lta, trigger_onset
        on = trigger_slider.value[1]; off=trigger_slider.value[0]
        cft = classic_sta_lta(st[0].data, 
                              int(stalta_slider.value[0] * st[0].stats.sampling_rate),
                              int(stalta_slider.value[1] * st[0].stats.sampling_rate))
        on_off = np.array(trigger_onset(cft, on, off))

        source_stalta.data = dict(times=st[0].times(), cft=cft)
        source_triggers.data = dict(ontimes=st[0].times()[on_off[:,0]], y=np.zeros(on_off[:,0].shape))
        #source_triggers.data = dict(offtimes=st[0].times()[on_off[:,1]], y=np.zeros(on_off[:,1].shape))

        sta_on.location = on
        sta_off.location = off
        
    elif ticker_alg.value == 'Recursive STA/LTA':
        from obspy.signal.trigger import recursive_sta_lta, trigger_onset
        on = trigger_slider.value[1]; off=trigger_slider.value[0]
        cft = recursive_sta_lta(st[0].data,
                                int(stalta_slider.value[0] * st[0].stats.sampling_rate),
                                int(stalta_slider.value[1] * st[0].stats.sampling_rate))
        on_off = np.array(trigger_onset(cft, on, off))

        source_stalta.data = dict(times=st[0].times(), cft=cft)
        source_triggers.data = dict(ontimes=st[0].times()[on_off[:,0]], y=np.zeros(on_off[:,0].shape))
        #source_triggers.data = dict(offtimes=st[0].times()[on_off[:,1]], y=np.zeros(on_off[:,1].shape))

        sta_on.location = on
        sta_off.location = off

    elif ticker_alg.value == 'Carl-Sta-Trig [Not Yet Implemented]':
        from obspy.signal.trigger import carl_sta_trig, trigger_onset
        on = 3000; off=-500
        cft = carl_sta_trig(st[0].data, int(5 * st[0].stats.sampling_rate), int(10 * st[0].stats.sampling_rate), 0.8, 0.8)
        on_off = np.array(trigger_onset(cft, on, off))

        source_stalta.data = dict(times=st[0].times(), cft=cft)
        source_triggers.data = dict(ontimes=st[0].times()[on_off[:,0]], y=np.zeros(on_off[:,0].shape))

        sta_on.location = on
        sta_off.location = off

    else:
        print(ticker_alg.value + ' is not yet implemented.')
        ticker_alg.value = prev_val
예제 #14
0
    def _onset(self, stream, stw, ltw, log, timespan):
        """
        Generates an onset (characteristic) function. If there are multiple
        components, these are combined as the root-mean-square of the onset
        functions AFTER taking a log (if requested).

        Parameters
        ----------
        stream : `obspy.Stream` object
            Stream containing the pre-processed data from which to calculate
            the onset function.
        stw : int
            Number of samples in the short-term window.
        ltw : int
            Number of samples in the long-term window.
        log : bool
            Calculate log(onset) if True, otherwise calculate the raw onset.
        timespan : float or None
            If a timespan is provided it will be used to calculate the tapered
            window of data at the start and end of the onset function which
            should be disregarded.

        Returns
        -------
        onset : `numpy.ndarray` of float
            STA/LTA onset function.

        """

        if self.position == "centred":
            onsets = [sta_lta_centred(tr.data, stw, ltw) for tr in stream]
        elif self.position == "classic":
            onsets = [classic_sta_lta(tr.data, stw, ltw) for tr in stream]
        onsets = np.array(onsets)

        if timespan:
            onsets = self._trim_taper_pad(onsets, stw, ltw, timespan)

        np.clip(1 + onsets, 0.8, np.inf, onsets)
        if log:
            np.log(onsets, onsets)

        onset = np.sqrt(
            np.sum([onset**2 for onset in onsets], axis=0) / len(onsets))

        return onset
예제 #15
0
def check_sta_lta(st,
                  sta_length=1.0,
                  lta_length=20.0,
                  threshold=5.0,
                  config=None):
    """
    Checks that the maximum STA/LTA ratio for AT LEAST ONE of the stream's
    traces is above a certain threshold.

    Args:
        st (obspy.core.stream.Stream):
            Stream of data.
        sta_length (float):
            Length of time window for STA (seconds).
        lta_length (float):
            Length of time window for LTA (seconds).
        threshold (float):
            Required maximum STA/LTA ratio to pass the test.
        config (dict):
            Configuration dictionary (or None). See get_config().

    Returns:
        Stream that has been checked for sta/lta requirements.
    """
    if not st.passed:
        return st

    for tr in st:
        sr = tr.stats.sampling_rate
        nlta = lta_length * sr + 1
        if len(tr) >= nlta:
            sta_lta = classic_sta_lta(tr.data, sta_length * sr + 1, nlta)
            if sta_lta.max() < threshold:
                tr.fail(
                    "Failed sta/lta check because threshold sta/lta is not exceeded."
                )
        else:
            tr.fail("Failed sta/lta check because record length is shorter "
                    "than lta length.")

    return st
예제 #16
0
def getStaLtaValues(stations,
                    start,
                    duration,
                    sta_win,
                    lta_win,
                    sr,
                    sep='-',
                    quiet=True):
    """
    Creates lists of STA/LTA values for each station
    @params:
        stations    a list of stations created by
                    obspy.read
        start       the start time in seconds
        duration    the duration of times to consider
                    in seconds
        sta_win     the STA window size in seconds
        lta_win     the LTA window size in seconds
        sr          the sampling rate in Hz
        quiet       if 'True', no output to screen.
                    Default is 'True'
    @return:
        values      a dictionary of STA/LTA values
                    where the keys are the station
                    names
    """
    if not quiet:
        print "STA/LTA CALCULATIONS..."
    values = {}

    for sta in stations:
        values[sta.stats.station + sep + sta.stats.channel] = classic_sta_lta(
            sta.data[int(start * sr):int((start + duration) * sr)],
            int(sta_win * sr), int(lta_win * sr))
    if not quiet:
        print "done"
    return values
예제 #17
0
S._cleanup()
#ZNALAZLAM TO W METODZIE MERGE, ALE NIE ROZUMIEM PO CO TU TO JEST

#Tego na razie nie robimy - to zrobimy potem
#S.trim(ST-3600,ET+3600)

print(S)
print()
Z = S.select(component="Z")
print(Z)
df = Z[0].stats.sampling_rate
print("df=", df)
Z.filter('lowpass', freq=1.0, corners=2, zerophase=True)
cft = {}
for i in range(len(Z)):
    cft[i] = classic_sta_lta(Z[i].data, int(60 * df), int(180 * df))
#    plot_trigger(Z[i],cft[i], 1.5, 0.5)

#Lista triggerow w zapisie True/False dla każdego z odcinków, czestosc zapisu - 100 Hz
trig3 = {}
#num=[]
for k in range(len(cft)):
    trig3[k] = []
    for i in range(len(cft[k])):
        if cft[k][i] > 1.5:
            trig3[k].append(True)
#           num.append(i)
        else:
            trig3[k].append(False)

#zmniejszenie czestosci zapisu
예제 #18
0
def main(args):
    times_csv = {
        "start_time": [],
        "end_time": [],
        "utc_start_timestamp": [],
        "utc_end_timestamp": [],
        "stname": []
    }
    stream_path = args.stream_path
    stream_files = [
        file for file in os.listdir(stream_path)
        if fnmatch.fnmatch(file, 'XX.MXI.2008207000000.mseed')
    ]

    for file in stream_files:
        stream_path1 = os.path.join(stream_path, file)
        print("+ Loading Stream {}".format(file))
        st1 = read(stream_path1)
        st1 = preprocess_stream(st1)
        tr1 = st1[0]
        tr2 = st1[1]
        tr3 = st1[2]
        # p_pick, s_pick = ar_pick(
        #     tr1, tr2, tr3, tr1.stats.sampling_rate, 1.0, 20.0, 1.0, 0.1, 4.0, 1.0, 2, 8, 0.1, 0.2)
        # return 0
        threechannels = st1
        msg = "%s %s %s" % (tr1.stats.station, str(
            tr1.stats.starttime), str(tr1.stats.endtime))
        print(msg)
        delta1 = UTCDateTime(tr1.stats.starttime)
        delta2 = UTCDateTime(tr1.stats.endtime)
        t1 = math.ceil(delta1.timestamp)
        t2 = delta2.timestamp
        print(t1, t2)
        if args.save_mseed:
            mseed_dir = os.path.join(args.output, "mseed")
            if os.path.exists(mseed_dir):
                shutil.rmtree(mseed_dir)
            os.makedirs(mseed_dir)
            output_mseed = os.path.join(mseed_dir,
                                        tr1.stats.station + "start.mseed")
            st1.slice(tr1.stats.starttime, tr1.stats.starttime +
                      args.window_size).write(output_mseed, format="mseed")
        for t3 in range(int(t1), int(t2), args.window_step):
            t = UTCDateTime(t3)
            #print("Cut a slice at time:",t,tr1.stats.station,tr1.stats.sac.stlo, tr1.stats.sac.stla,str(file))
            lsplit3 = '{:0>2s}'.format(str(t.hour))
            lsplit4 = '{:0>2s}'.format(str(t.minute))
            lsplit5 = '{:0>2s}'.format(str(t.second))
            lsplit1 = '{:0>2s}'.format(str(t.year))
            lsplit2 = '{:0>2s}'.format(str(t.month))
            lsplit6 = '{:0>2s}'.format(str(t.day))
            t4 = t + args.window_size
            st1_ = tr1.slice(t, t4)
            st2_ = tr2.slice(t, t4)
            st3_ = tr3.slice(t, t4)
            df = st1_.stats.sampling_rate
            # Characteristic function and trigger onsets
            #cft = recursive_sta_lta(st1_.data, int(2 * df), int(20. * df))
            try:
                cft = classic_sta_lta(st1_.data, int(0.5 * df), int(60. * df))
                #cft1 = recursive_sta_lta(st2_.data, int(2 * df), int(20. * df))
                cft1 = classic_sta_lta(st2_.data, int(0.5 * df), int(60. * df))
                #cft2 = recursive_sta_lta(st3_.data, int(2 * df), int(20. * df))
                cft2 = classic_sta_lta(st3_.data, int(0.5 * df), int(60. * df))
            except:
                continue
            #cft = classic_sta_lta(st_.data, int(2.5 * df), int(10. * df))
            on_of = trigger_onset(cft, 5, 1.2)
            on_of1 = trigger_onset(cft1, 5, 1.2)
            on_of2 = trigger_onset(cft2, 5, 1.2)
            if len(on_of) or len(on_of1) or len(on_of2):
                print(t, t4, tr1.stats.station)
                print(on_of, on_of1, on_of2)
                filename1 = tr1.stats.station + lsplit1 + \
                    lsplit2 + lsplit6 + lsplit3 + lsplit4 + lsplit5
                times_csv["start_time"].append(t)
                times_csv["utc_start_timestamp"].append(t.timestamp)
                times_csv["end_time"].append(t4)
                times_csv["utc_end_timestamp"].append(t4.timestamp)
                times_csv["stname"].append(filename1)

                # mseed_files = filename1+lsplit3+lsplit4+lsplit5 + '.mseed'
                # mseed_path = os.path.join(args.output, mseed_files)
                # threechannels.write(mseed_path, format="mseed")
                mseed_files = filename1 + '.mseed'
                print(mseed_files)
                if len(on_of):
                    min_on_of = min(on_of[:, 0])
                    max_on_of = max(on_of[:, 1])
                else:
                    min_on_of = args.window_size * df
                    max_on_of = 0
                if len(on_of1):
                    min_on_of1 = min(on_of1[:, 0])
                    max_on_of1 = max(on_of1[:, 1])
                else:
                    min_on_of1 = args.window_size * df
                    max_on_of1 = 0
                if len(on_of2):
                    min_on_of2 = min(on_of2[:, 0])
                    max_on_of2 = max(on_of2[:, 1])
                else:
                    min_on_of2 = args.window_size * df
                    max_on_of2 = 0
                minon_of = min(min_on_of, min_on_of1, min_on_of2)
                maxon_of = max(max_on_of, max_on_of1, max_on_of2)
                minon = int(minon_of / 100)
                maxon = int(maxon_of / 100)
                print(minon_of, maxon_of)

                if args.save_mseed:
                    mseed_dir = os.path.join(args.output, "mseed")
                    output_mseed = os.path.join(mseed_dir, mseed_files)
                    threechannels.slice(t + minon,
                                        t + maxon).write(output_mseed,
                                                         format="mseed")
                if args.plot:
                    viz_dir = os.path.join(args.output, "viz")
                    if not os.path.exists(viz_dir):
                        os.makedirs(viz_dir)
                    threechannels.slice(
                        t + minon, t + maxon).plot(outfile=os.path.join(
                            viz_dir,
                            mseed_files.split(".mseed")[0] + '.png'))
        threechannels.clear()
        st1.clear()

    df = pd.DataFrame.from_dict(times_csv)
    print(df.shape[0])
    output_catalog = os.path.join(args.output, 'detection.csv')
    df.to_csv(output_catalog)
예제 #19
0
             #print("Cut a slice at time:",t,tr1.stats.station,tr1.stats.sac.stlo, tr1.stats.sac.stla,str(file))
             lsplit3='{:0>2s}'.format(str(t.hour))
             lsplit4='{:0>2s}'.format(str(t.minute))
             lsplit5='{:0>2s}'.format(str(t.second))
             lsplit1 = '{:0>2s}'.format(str(t.year))
             lsplit2 = '{:0>2s}'.format(str(t.month))
             lsplit6 = '{:0>2s}'.format(str(t.day))
             t4=t + args.window_size
             st1_ = tr1.slice(t, t4)
             st2_ = tr2.slice(t, t4)
             st3_ = tr3.slice(t, t4)
             df = st1_.stats.sampling_rate
 # Characteristic function and trigger onsets
             #cft = recursive_sta_lta(st1_.data, int(2 * df), int(20. * df))
             try:
                 cft = classic_sta_lta(st1_.data, int(0.5* df), int(60. * df))
             #cft1 = recursive_sta_lta(st2_.data, int(2 * df), int(20. * df))
                 cft1 = classic_sta_lta(st2_.data, int(0.5 * df), int(60. * df))
             #cft2 = recursive_sta_lta(st3_.data, int(2 * df), int(20. * df))
                 cft2 = classic_sta_lta(st3_.data, int(0.5 * df), int(60. * df))
             except:
                 continue
         #cft = classic_sta_lta(st_.data, int(2.5 * df), int(10. * df))
             on_of = trigger_onset(cft, 5, 1.2)
             on_of1 = trigger_onset(cft1, 5, 1.2)
             on_of2 = trigger_onset(cft2, 5, 1.2)
             if len(on_of) or len(on_of1) or len(on_of2):
                 print t,t4,tr1.stats.station
                 print on_of,on_of1,on_of2
                 filename1 = tr1.stats.station +lsplit1 + lsplit2 + lsplit6+ lsplit3 + lsplit4 + lsplit5
                 times_csv["start_time"].append(t)
예제 #20
0
def stalta_pick(stream,
                stalen,
                ltalen,
                trig_on,
                trig_off,
                freqmin=False,
                freqmax=False,
                debug=0,
                show=False):
    """
    Basic sta/lta picker, suggest using alternative in obspy.
    Simple sta-lta (short-term average/long-term average) picker, using \
    obspy's stalta routine to generate the characteristic function.

    Currently very basic quick wrapper, there are many other (better) options \
    in obspy, found \
    `here <http://docs.obspy.org/packages/autogen/obspy.signal.trigger.html>`_.

    :type stream: obspy.Stream
    :param stream: The stream to pick on, can be any number of channels.
    :type stalen: float
    :param stalen: Length of the short-term average window in seconds.
    :type ltalen: float
    :param ltalen: Length of the long-term average window in seconds.
    :type trig_on: float
    :param trig_on: sta/lta ratio to trigger a detection/pick
    :type trig_off: float
    :param trig_off: sta/lta ratio to turn the trigger off - no further picks\
        will be made between exceeding trig_on until trig_off is reached.
    :type freqmin: float
    :param freqmin: Low-cut frequency in Hz for bandpass filter
    :type freqmax: float
    :param freqmax: High-cut frequency in Hz for bandpass filter
    :type debug: int
    :param debug: Debug output level from 0-5.
    :type show: bool
    :param show: Show picks on waveform.

    :returns: obspy.core.event.Event

    .. rubric:: Example

    >>> from obspy import read
    >>> from eqcorrscan.utils.picker import stalta_pick
    >>> st = read()
    >>> event = stalta_pick(st, stalen=0.2, ltalen=4, trig_on=10,
    ...             trig_off=1, freqmin=3.0, freqmax=20.0)
    >>> event.creation_info.author
    'EQcorrscan'
    """
    from obspy.signal.trigger import classic_sta_lta, trigger_onset
    from obspy.signal.trigger import plot_trigger
    from obspy import UTCDateTime
    from obspy.core.event import Event, Pick, WaveformStreamID
    from obspy.core.event import CreationInfo, Comment, Origin
    import eqcorrscan.utils.plotting as plotting

    event = Event()
    event.origins.append(Origin())
    event.creation_info = CreationInfo(author='EQcorrscan',
                                       creation_time=UTCDateTime())
    event.comments.append(Comment(text='stalta'))
    picks = []
    for tr in stream:
        # We are going to assume, for now, that if the pick is made on the
        # horizontal channel then it is an S, otherwise we will assume it is
        # a P-phase: obviously a bad assumption...
        if tr.stats.channel[-1] == 'Z':
            phase = 'P'
        else:
            phase = 'S'
        if freqmin and freqmax:
            tr.detrend('simple')
            tr.filter('bandpass',
                      freqmin=freqmin,
                      freqmax=freqmax,
                      corners=3,
                      zerophase=True)
        df = tr.stats.sampling_rate
        cft = classic_sta_lta(tr.data, int(stalen * df), int(ltalen * df))
        if debug > 3:
            plot_trigger(tr, cft, trig_on, trig_off)
        triggers = trigger_onset(cft, trig_on, trig_off)
        for trigger in triggers:
            on = tr.stats.starttime + (trigger[0] / df)
            # off = tr.stats.starttime + (trigger[1] / df)
            wav_id = WaveformStreamID(station_code=tr.stats.station,
                                      channel_code=tr.stats.channel,
                                      network_code=tr.stats.network)
            pick = Pick(waveform_id=wav_id, phase_hint=phase, time=on)
            if debug > 2:
                print('Pick made:')
                print(pick)
            picks.append(pick)
    # QC picks
    del pick
    pick_stations = list(set([pick.waveform_id.station_code
                              for pick in picks]))
    for pick_station in pick_stations:
        station_picks = [
            pick for pick in picks
            if pick.waveform_id.station_code == pick_station
        ]
        # If P-pick is after S-picks, remove it.
        p_time = [
            pick.time for pick in station_picks if pick.phase_hint == 'P'
        ]
        s_time = [
            pick.time for pick in station_picks if pick.phase_hint == 'S'
        ]
        if p_time > s_time:
            p_pick = [pick for pick in station_picks if pick.phase_hint == 'P']
            for pick in p_pick:
                print('P pick after S pick, removing P pick')
                picks.remove(pick)
    if show:
        plotting.pretty_template_plot(stream,
                                      picks=picks,
                                      title='Autopicks',
                                      size=(8, 9))
    event.picks = picks
    event.origins[0].time = min([pick.time for pick in event.picks]) - 1
    event.origins[0].latitude = float('nan')
    event.origins[0].longitude = float('nan')
    # Set arbitrary origin time
    return event
예제 #21
0
    #fig = plt.figure(1, figsize=(8, 4))
    fig.clf()
    ax = fig.add_subplot(211)
    ax.plot(t, tr.data, 'black')
    ax2 = fig.add_subplot(212, sharex=ax)
    ax2.plot(t, cft, 'black')
    onof = np.array(triggerOnset(cft, thr1, thr2))
    i, j = ax.get_ylim()
    try:
        ax.vlines(onof[:, 0] / df, i, j, color='red', lw=2)
        ax.vlines(onof[:, 1] / df, i, j, color='blue', lw=2)
    except IndexError:
        pass
    ax2.axhline(thr1, color='red', lw=1, ls='--')
    ax2.axhline(thr2, color='blue', lw=1, ls='--')
    fig.canvas.draw()
    plt.show()


def plot_threaded(tr, cft, thr1, thr2):
    thread = threading.Thread(target=plot_trigger, args=(tr, cft, thr1, thr2))
    thread.start()


st = read("data/ev0_6.a04.gse2")[0]
#st=read("data/ev0_6.a04.gse2") is a stream object
#st=read("data/ev0_6.a04.gse2")[0] is a trace object
df = st.stats.sampling_rate

cft = classic_sta_lta(st, int(5 * df), int(10 * df))
plot_trigger(st, cft, 1.5, 0.5)
예제 #22
0
def analyze_coda(trace,
                 fm=6,
                 cycle=10,
                 noise_level=16,
                 Lw=50,
                 noise_duration=5,
                 subwdw_length=5,
                 subwdw_length_rec=2.5):
    """
        Return the correlation coefficient of the coda part of the signal : the onset of the coda
        is selected as the maximum amplitude time and the coda duration is Lw.

        :param trace: an obspy.core.Trace object
        :return: a list of tuples of the form:
        (slope_start_time, slope, intercept, R, pvalue, stderr)
        where slope_start_time is an obspy UTCDateTime object. For the other values, see:
        http://docs.scipy.org/doc/scipy-0.14.0/reference/generated/scipy.stats.linregress.html
        for details
        :rtype: see return field
    """
    st = trace
    try:
        st.data = bandpass(st.data,
                           freqmin=4,
                           freqmax=8,
                           df=st.stats.sampling_rate,
                           corners=2)
    except ValueError:
        return None
    if (st.stats.npts * st.stats.delta) > 100:
        st.detrend('demean')  # on ramene le signal a 0
        energy = st.data * st.data
        t = st.times()
        st_smooth, t_smooth = mysmooth(energy, t, fm, cycle, st.stats.delta)
        imax = st_smooth.index(max(st_smooth))
        new_dt = round(t_smooth[1] - t_smooth[0], 2)
        sec = int(noise_duration //
                  new_dt)  # on prend 10seconde de debut de signal
        noise = st_smooth[0:sec]  # on prend 5 seconde pour la moyenne de bruit
        # df=st.stats.sampling_rate
        # df = 1/new_dt

        # valeur que j'ai prise= 2 et 5 (en echantillon)
        cft = classic_sta_lta(noise, nsta=2, nlta=5)
        stalta = np.where(cft > 3)[0]  # valeur que j'ai prise =1.5
        # si on detecte effectivement du signal dans la fenetre de bruit: ca va pas
        if len(stalta) > 0:
            return None  # on ne peut pas definir une bonne moyenne de bruit
        else:
            noisedata = noise
        # ----fin definition moyenne du bruit ----------------------------------------
        # ##### duree de la coda = du maximum de l'enveloppe ------> ratio signal/bruit<4 #######
        j = 0
        start = imax
        end_ = start + int(
            subwdw_length // new_dt)  # on prend 5s de fenetre glissante
        # rec_window = new_dt/2.  # 50% de recouvrement
        n_rec = int(subwdw_length_rec //
                    new_dt)  # nombre de pts de recouvrement : on choisit 2.5s
        ratio = []
        while j < len(st_smooth[imax:imax + int(Lw // new_dt)]):
            ratio.append(np.mean(st_smooth[start:end_]) / np.mean(noisedata))
            j = j + n_rec
            start = start + n_rec
            end_ = start + int(subwdw_length // new_dt)
        # ou est ce que le signal dans les 80s de fenetre de coda est superieur au niveau de bruit
        indok = np.where(np.array(ratio) > noise_level)[0]
        ret_vals = None
        if len(indok) > 0:
            doublons = list(group(indok))
            if (len(doublons) == 1) and (doublons[0][-1] == len(ratio)-1) or (doublons[0][0] == 0) \
                    and (doublons[0][-1] == len(ratio)-1):
                # ca veut dire qu'il detecte une coda ou du moins un ratio>4 et
                # on choisi une longueur de  au moins 20 seconde
                coda = st_smooth[imax:imax +
                                 int(Lw // new_dt)]  # donnee lissee

                # tcoda = t_smooth[imax:imax+int(Lw/new_dt)]

                # raw=st.data[imax:imax+int(Lw/new_dt)]# donnee brut

                # test sur la coda pour voir si on a bien une "pente" :
                # on joue avec le coeff de correlation

                # tr is the coda trace
                coda = np.log10(
                    coda
                )  # on travaille en log avec la coda pour avoir une pente
                n_pts = len(coda)  # nombre de point dans la coda
                # window=5
                # rec=2.5

                # nombre de pts dans la fenetre de 5 seconde
                wdw_npts = int(subwdw_length // new_dt)
                # nombre de point pour la fenetre de recouvrement:
                wdw_rec = int(subwdw_length_rec // new_dt)
                # borne maximale a atteindre pour faire des fenetres de 5 seconde:
                n_max = int(n_pts // wdw_npts)
                start = 0
                end = wdw_npts

                means = []
                x_means = []
                k = 0
                while end < n_max * wdw_npts:
                    means.append(np.mean(coda[start:end]))
                    x_means.append(k)
                    k = k + 1
                    start = start + wdw_rec
                    end = end + wdw_rec
                slope, intercept, R, pvalue, stderr = scipy.stats.linregress(
                    x_means, means)  # @UndefinedVariable
                start_time = st.stats.starttime + t_smooth[imax]
                ret_vals = (start_time, slope, intercept, R, pvalue, stderr)

        return ret_vals
예제 #23
0
import obspy, sys
import numpy as np
import matplotlib.pyplot as plt
from obspy.signal.trigger import classic_sta_lta, plot_trigger

on  = 1.5
off = 1.0

trace = obspy.read("TAU.IG.BHZ..D.2012.082.203455.00023000.sac")[0]
df    = trace.stats.sampling_rate

cft = classic_sta_lta(trace.data, int(5. * df), int(10. * df))

cft  = cft - on
cfts = np.sign(cft)
cftd = np.diff(cfts)

ind_on = np.where(cftd == 2)

plt.plot(trace.times(),trace.data)


for k in ind_on[0]:
    print k, trace.times()[k]
    plt.axvline(trace.times()[k],color='r')

plt.savefig('deletme.png')

예제 #24
0
st[0].plot()

st[0].copy().taper(0.05).filter("bandpass",
                                freqmin=freq_bounds[0],
                                freqmax=freq_bounds[1]).plot()

# + {"deletable": true, "editable": true}

tr = st[0].filter("bandpass", freqmin=freq_bounds[0], freqmax=freq_bounds[1])
sps = int(tr.stats.sampling_rate)

onthresh = 2.0
offthresh = 0.5
shortwin = 5  # sec
longwin = 20  # sec
cft = trg.classic_sta_lta(tr.data, shortwin * sps, longwin * sps)
trg.plot_trigger(tr, cft, onthresh, offthresh)

# + {"deletable": true, "editable": true, "cell_type": "markdown"}
# In multiple studies (Qamar, 1988; O'Neel, 2007; Bartholomaus, 2015; Kohler 2016), the duration of calving icequakes  has been shown to be an important predictor of iceberg size and/or calving flux.  Below, calculate and record the cumulative duration of calving events on randomly selected days during the Yahtse seismic experiment.  I recommend choosing some fixed amount of time on the first of each month.  Choose the longest fixed amount of time that you can bear to wait for the data to be retrieved and the analysis to run.  I recommend trying 12 hrs on the first of each month.

# + {"deletable": true, "editable": true}
trigs = trg.trigger_onset(cft, onthresh,
                          offthresh)  # the array of trigger on and off indices

event_times = np.array(starttime) + trigs / sps  # Detection "on" times

# del durs # remove the durs variable, so that old time runs don't contaminate new runs.
durs = np.diff(trigs, axis=1) / sps  # s  duration of events
durs = durs[:, 0]
예제 #25
0
    def _compute_features(self, arr, window=False):
        if window:
            result = np.zeros_like(self.result_template_window)
        else:
            result = np.zeros_like(self.result_template)
        i = 0
        if self.minimum:
            result[i] = np.min(arr)
            i += 1
        if self.maximum:
            result[i] = np.max(arr)
            i += 1
        if self.mean:
            result[i] = np.mean(arr)
            i += 1
        if self.median:
            result[i] = np.median(arr)
            i += 1
        if self.std:
            result[i] = np.std(arr)
            i += 1
        if self.abs_min:
            result[i] = np.min(np.abs(arr))
            i += 1
        if self.abs_max:
            result[i] = np.max(np.abs(arr))
            i += 1
        if self.abs_mean:
            result[i] = np.mean(np.abs(arr))
            i += 1
        if self.abs_median:
            result[i] = np.median(np.abs(arr))
            i += 1
        if self.abs_std:
            result[i] = np.std(np.abs(arr))
            i += 1
        if self.mean_abs_delta:
            result[i] = np.mean(np.diff(arr))
            i += 1
        if self.mean_rel_delta:
            result[i] = np.mean(np.nonzero((np.diff(arr) / arr[:-1]))[0])
            i += 1
        if self.max_to_min:
            result[i] = np.max(arr) / np.abs(np.min(arr))
            i += 1
        if self.abs_trend:
            idx = np.array(range(len(arr)))
            lr = LinearRegression()
            lr.fit(idx.reshape(-1, 1), np.abs(arr))
            result[i] = lr.coef_[0]
            i += 1
        if self.mad:  # mean absolute deviation
            result[i] = np.mean(np.abs(arr - np.mean(arr)))
            i += 1
        if self.skew:
            result[i] = stats.skew(arr)
            i += 1
        if self.abs_skew:
            result[i] = stats.skew(np.abs(arr))
            i += 1
        if self.kurtosis:  # measure of tailedness
            result[i] = stats.kurtosis(arr)
            i += 1
        if self.abs_kurtosis:  # measure of tailedness
            result[i] = stats.kurtosis(np.abs(arr))
            i += 1
        if self.hilbert:  # abs mean in hilbert tranformed space
            result[i] = np.mean(np.abs(signal.hilbert(arr)))
            i += 1
        if self.hann:  # mean in hann window
            result[i] = np.mean(
                signal.convolve(arr, signal.hann(150), mode='same') /
                np.sum(signal.hann(150)))
            i += 1
        if self.corr_length:
            result[i] = self._autocorr_length(
                pd.Series(arr).reset_index(drop=True))
            i += 1
        if self.quantiles is not None:
            result[i:i + len(self.quantiles)] = np.quantile(arr,
                                                            q=self.quantiles)
            i += len(self.quantiles)
        if self.abs_quantiles is not None:
            result[i:i + len(self.abs_quantiles)] = np.quantile(
                np.abs(arr), q=self.abs_quantiles)
            i += len(self.abs_quantiles)
        if self.count_abs_big is not None:
            result[i:i + len(self.count_abs_big)] = np.array(
                [len(arr[np.abs(arr) > q]) for q in self.count_abs_big])
            i += len(self.count_abs_big)
        if self.stalta:
            if window:
                result[i:i + len(self.stalta_window)] = np.array([
                    np.mean(classic_sta_lta(arr, q[0], q[1]))
                    for q in self.stalta_window
                ])
                i += len(self.stalta_window)
            else:
                result[i:i + len(self.stalta)] = np.array([
                    np.mean(classic_sta_lta(arr, q[0], q[1]))
                    for q in self.stalta
                ])
                i += len(self.stalta)
        if self.exp_mov_ave:
            if window:
                result[i:i + len(self.exp_mov_ave_window)] = np.array([
                    np.mean(pd.Series.ewm(pd.Series(arr), span=q).mean())
                    for q in self.exp_mov_ave_window
                ])
                i += len(self.exp_mov_ave_window)
            else:
                result[i:i + len(self.exp_mov_ave)] = np.array([
                    np.mean(pd.Series.ewm(pd.Series(arr), span=q).mean())
                    for q in self.exp_mov_ave
                ])
                i += len(self.exp_mov_ave)

        return result
예제 #26
0
def analyze_coda(trace, fm=6, cycle=10, noise_level=16, Lw=50, noise_duration=5, subwdw_length=5,
                 subwdw_length_rec=2.5):
    """
        Return the correlation coefficient of the coda part of the signal : the onset of the coda is selected as the maximum amplitude time and the coda duration is Lw.


        NOTE: this function accepts also streams objects (see @stream_compliant decorator in
        stream2segments.mseeds)

        :param trace: an obspy.core.Trace object
        :return: a list of tuples of the form:
        (slope_start_time, slope, intercept, R, pvalue, stderr)
        where slope_start_time is an obspy UTCDateTime object. For the other values, see:
        http://docs.scipy.org/doc/scipy-0.14.0/reference/generated/scipy.stats.linregress.html
        for details
        :rtype: see return field
    """
    st = trace
    try:
        st.data = bandpass(st.data, freqmin=4, freqmax=8, df=st.stats. sampling_rate, corners=2)
    except ValueError:
        return None
    if (st.stats.npts*st.stats.delta) > 100:
        st.detrend('demean')  # on ramene le signal a 0
        energy = st.data * st.data
        t = st.times()
        st_smooth, t_smooth = mysmooth(energy, t, fm, cycle, st.stats.delta)
        imax = st_smooth.index(max(st_smooth))
        new_dt = round(t_smooth[1]-t_smooth[0], 2)
        sec = int(noise_duration/new_dt)  # on prend 10seconde de debut de signal
        noise = st_smooth[0:sec]  # on prend 5 seconde pour la moyenne de bruit
        # df=st.stats.sampling_rate
        # df = 1/new_dt

        # valeur que j'ai prise= 2 et 5 (en echantillon)
        cft = classic_sta_lta(noise, nsta=2, nlta=5)
        stalta = np.where(cft > 3)[0]  # valeur que j'ai prise =1.5
        # si on detecte effectivement du signal dans la fenetre de bruit: ca va pas
        if len(stalta) > 0:
            return None  # on ne peut pas definir une bonne moyenne de bruit
        else:
            noisedata = noise
        # ----fin definition moyenne du bruit ----------------------------------------
        # ##### duree de la coda = du maximum de l'enveloppe ------> ratio signal/bruit<4 #######
        j = 0
        start = imax
        end_ = start+int(subwdw_length/new_dt)  # on prend 5s de fenetre glissante
        # rec_window = new_dt/2.  # 50% de recouvrement
        n_rec = int(subwdw_length_rec/new_dt)  # nombre de pts de recouvrement : on choisit 2.5s
        ratio = []
        while j < len(st_smooth[imax:imax+int(Lw/new_dt)]):
            ratio.append(np.mean(st_smooth[start:end_]) / np.mean(noisedata))
            j = j+n_rec
            start = start+n_rec
            end_ = start+int(subwdw_length/new_dt)
        # ou est ce que le signal dans les 80s de fenetre de coda est superieur au niveau de bruit
        indok = np.where(np.array(ratio) > noise_level)[0]
        ret_vals = None
        if len(indok) > 0:
            doublons = list(group(indok))
            if (len(doublons) == 1) and (doublons[0][-1] == len(ratio)-1) or (doublons[0][0] == 0) \
                    and (doublons[0][-1] == len(ratio)-1):
                # ca veut dire qu'il detecte une coda ou du moins un ratio>4 et
                # on choisi une longueur de  au moins 20 seconde
                coda = st_smooth[imax:imax+int(Lw/new_dt)]  # donnee lissee

                # tcoda = t_smooth[imax:imax+int(Lw/new_dt)]

                # raw=st.data[imax:imax+int(Lw/new_dt)]# donnee brut

                # test sur la coda pour voir si on a bien une "pente" :
                # on joue avec le coeff de correlation

                # tr is the coda trace
                coda = np.log10(coda)  # on travaille en log avec la coda pour avoir une pente
                n_pts = len(coda)  # nombre de point dans la coda
                # window=5
                # rec=2.5

                # nombre de pts dans la fenetre de 5 seconde
                wdw_npts = int(subwdw_length / new_dt)
                # nombre de point pour la fenetre de recouvrement:
                wdw_rec = int(subwdw_length_rec / new_dt)
                # borne maximale a atteindre pour faire des fenetres de 5 seconde:
                n_max = np.floor(n_pts / wdw_npts)
                start = 0
                end = wdw_npts

                means = []
                x_means = []
                k = 0
                while end < n_max * wdw_npts:
                    means.append(np.mean(coda[start: end]))
                    x_means.append(k)
                    k = k + 1
                    start = start + wdw_rec
                    end = end + wdw_rec
                slope, intercept, R, pvalue, stderr = sc.stats.linregress(x_means, means)
                start_time = st.stats.starttime + t_smooth[imax]
                ret_vals = (start_time, slope, intercept, R, pvalue, stderr)

        return ret_vals
예제 #27
0
 def trc_fgen_postfb(self, trc, dt, hwin=150, vlen=51):
     """ Constructs a feature matrix for the post-FB model """
     output = np.zeros((len(trc), ((11 * (vlen)) + 1)))
     trc_norm = trc / np.amax(np.abs(trc))
     trc_entropy = self.entropy(trc_norm, 50)
     trc_fdm = self.fdm(trc_norm, 50, np.arange(1, 4), 15)
     trc_slta = trigger.classic_sta_lta(trc_norm, 2, 100)
     trc_fq_win_sum = self.fq_win_sum(trc_norm, hwin, dt)
     hwin2 = 50
     trc_kurtosis_skew = self.kurtosis_skewness(trc_norm, hwin2)
     for i, j in enumerate(trc):
         if i < len(trc) - vlen - 1:
             ftrc = []
             fb = i * dt
             ftrc = np.append(
                 ftrc, trc_norm[np.int(fb / dt):np.int(fb / dt) + vlen])
             ftrc = np.append(
                 ftrc,
                 self.norm(np.gradient(
                     np.abs(trc_norm)))[np.int(fb / dt):np.int(fb / dt) +
                                        vlen])
             ftrc = np.append(
                 ftrc,
                 self.norm(trc_entropy)[np.int(fb / dt):np.int(fb / dt) +
                                        vlen])
             ftrc = np.append(
                 ftrc,
                 self.norm(
                     np.gradient(trc_entropy))[np.int(fb /
                                                      dt):np.int(fb / dt) +
                                               vlen])
             ftrc = np.append(
                 ftrc,
                 self.norm(trc_fdm)[np.int(fb / dt):np.int(fb / dt) + vlen])
             ftrc = np.append(
                 ftrc,
                 self.norm(
                     np.gradient(trc_fdm))[np.int(fb / dt):np.int(fb / dt) +
                                           vlen])
             ftrc = np.append(
                 ftrc,
                 self.norm(trc_slta)[np.int(fb / dt):np.int(fb / dt) +
                                     vlen])
             ftrc = np.append(
                 ftrc,
                 self.norm(trc_fq_win_sum)[np.int(fb / dt):np.int(fb / dt) +
                                           vlen])
             ftrc = np.append(
                 ftrc,
                 self.norm(np.gradient(trc_fq_win_sum))
                 [np.int(fb / dt):np.int(fb / dt) + vlen])
             ftrc = np.append(
                 ftrc,
                 self.norm(
                     trc_kurtosis_skew[0])[np.int(fb / dt):np.int(fb / dt) +
                                           vlen])
             ftrc = np.append(
                 ftrc,
                 self.norm(
                     trc_kurtosis_skew[1])[np.int(fb / dt):np.int(fb / dt) +
                                           vlen])
             ftrc = np.append(ftrc, 1)
             output[i, :] = ftrc
     return output
예제 #28
0
 def trc_fgen_prefb(self, trc, dt, nspad=200, hwin=150, vlen=51):
     """ Constructs feature matrices for the pre-FB model """
     output = np.zeros((len(trc), ((11 * (vlen)) + 1)))
     pad = np.random.rand(nspad) / 100
     trc_norm = trc / np.amax(np.abs(trc))
     trc_norm_padded = np.hstack((pad, trc_norm))
     trc_entropy = self.entropy(trc_norm_padded, 50)
     trc_fdm = self.fdm(trc_norm_padded, 50, np.arange(1, 4), 15)
     trc_slta = trigger.classic_sta_lta(trc_norm_padded, 2, 100)
     trc_fq_win_sum = self.fq_win_sum(trc_norm_padded, hwin, dt)
     hwin2 = 50
     trc_kurtosis_skew = self.kurtosis_skewness(trc_norm_padded, hwin2)
     for i, j in enumerate(trc):
         ftrc = []
         fb = i * dt
         ftrc = np.append(
             ftrc, trc_norm_padded[np.int(nspad + fb / dt) - vlen +
                                   1:np.int(nspad + fb / dt) + 1])
         ftrc = np.append(
             ftrc,
             self.norm(np.gradient(
                 np.abs(trc_norm_padded)))[np.int(nspad + fb / dt) - vlen +
                                           1:np.int(nspad + fb / dt) + 1])
         ftrc = np.append(
             ftrc,
             self.norm(trc_entropy)[np.int(nspad + fb / dt) - vlen +
                                    1:np.int(nspad + fb / dt) + 1])
         ftrc = np.append(
             ftrc,
             self.norm(
                 np.gradient(trc_entropy))[np.int(nspad + fb / dt) - vlen +
                                           1:np.int(nspad + fb / dt) + 1])
         ftrc = np.append(
             ftrc,
             self.norm(trc_fdm)[np.int(nspad + fb / dt) - vlen +
                                1:np.int(nspad + fb / dt) + 1])
         ftrc = np.append(
             ftrc,
             self.norm(
                 np.gradient(trc_fdm))[np.int(nspad + fb / dt) - vlen +
                                       1:np.int(nspad + fb / dt) + 1])
         ftrc = np.append(
             ftrc,
             self.norm(trc_slta)[np.int(nspad + fb / dt) - vlen +
                                 1:np.int(nspad + fb / dt) + 1])
         ftrc = np.append(
             ftrc,
             self.norm(trc_fq_win_sum)[np.int(nspad + fb / dt) - vlen +
                                       1:np.int(nspad + fb / dt) + 1])
         ftrc = np.append(
             ftrc,
             self.norm(
                 np.gradient(trc_fq_win_sum))[np.int(nspad + fb / dt) -
                                              vlen +
                                              1:np.int(nspad + fb / dt) +
                                              1])
         ftrc = np.append(
             ftrc,
             self.norm(
                 trc_kurtosis_skew[0])[np.int(nspad + fb / dt) - vlen +
                                       1:np.int(nspad + fb / dt) + 1])
         ftrc = np.append(
             ftrc,
             self.norm(
                 trc_kurtosis_skew[1])[np.int(nspad + fb / dt) - vlen +
                                       1:np.int(nspad + fb / dt) + 1])
         ftrc = np.append(ftrc, 1)
         output[i, :] = ftrc
     return output
예제 #29
0
# Trigger settings
ntriggersta = 2  # number of required channels w a coincident detection for a trigger

#####################

# Initialize data download and cft calculation

st = utils.grab_data(settings['server'], settings['port'], settings['scnl'],
                     UTCDateTime(settings['startstop'][0]),
                     UTCDateTime(settings['startstop'][1]))

from obspy.signal.trigger import coincidence_trigger

from obspy.signal.trigger import classic_sta_lta
cft = classic_sta_lta(st[0].data, int(3 * st[0].stats.sampling_rate),
                      int(8 * st[0].stats.sampling_rate))

# set up widgets

ticker_alg = Select(value=list(STALTA_ALGORITHMS.keys())[0],
                    options=list(STALTA_ALGORITHMS.keys()))
stalta_slider = RangeSlider(start=1,
                            end=15,
                            value=(3, 8),
                            step=1,
                            title="STA/LTA (seconds)")
trigger_slider = RangeSlider(start=0,
                             end=4,
                             value=(0.8, 1.4),
                             step=0.1,
                             title="Trigger On/Off")
예제 #30
0
def stalta_pick(stream,
                stalen,
                ltalen,
                trig_on,
                trig_off,
                freqmin=False,
                freqmax=False,
                show=False):
    """
    Basic sta/lta picker, suggest using alternative in obspy.

    Simple sta/lta (short-term average/long-term average) picker, using
    obspy's :func:`obspy.signal.trigger.classic_sta_lta` routine to generate
    the characteristic function.

    Currently very basic quick wrapper, there are many other (better) options
    in obspy in the :mod:`obspy.signal.trigger` module.

    :type stream: obspy.core.stream.Stream
    :param stream: The stream to pick on, can be any number of channels.
    :type stalen: float
    :param stalen: Length of the short-term average window in seconds.
    :type ltalen: float
    :param ltalen: Length of the long-term average window in seconds.
    :type trig_on: float
    :param trig_on: sta/lta ratio to trigger a detection/pick
    :type trig_off: float
    :param trig_off: sta/lta ratio to turn the trigger off - no further picks\
        will be made between exceeding trig_on until trig_off is reached.
    :type freqmin: float
    :param freqmin: Low-cut frequency in Hz for bandpass filter
    :type freqmax: float
    :param freqmax: High-cut frequency in Hz for bandpass filter
    :type show: bool
    :param show: Show picks on waveform.

    :returns: :class:`obspy.core.event.event.Event`

    .. rubric:: Example

    >>> from obspy import read
    >>> from eqcorrscan.utils.picker import stalta_pick
    >>> st = read()
    >>> event = stalta_pick(st, stalen=0.2, ltalen=4, trig_on=10,
    ...             trig_off=1, freqmin=3.0, freqmax=20.0)
    >>> print(event.creation_info.author)
    EQcorrscan

    .. warning::
        This function is not designed for accurate picking, rather it can give
        a first idea of whether picks may be possible.  Proceed with caution.
    """
    event = Event()
    event.origins.append(Origin())
    event.creation_info = CreationInfo(author='EQcorrscan',
                                       creation_time=UTCDateTime())
    event.comments.append(Comment(text='stalta'))
    picks = []
    for tr in stream:
        # We are going to assume, for now, that if the pick is made on the
        # horizontal channel then it is an S, otherwise we will assume it is
        # a P-phase: obviously a bad assumption...
        if tr.stats.channel[-1] == 'Z':
            phase = 'P'
        else:
            phase = 'S'
        if freqmin and freqmax:
            tr.detrend('simple')
            tr.filter('bandpass',
                      freqmin=freqmin,
                      freqmax=freqmax,
                      corners=3,
                      zerophase=True)
        df = tr.stats.sampling_rate
        cft = classic_sta_lta(tr.data, int(stalen * df), int(ltalen * df))
        triggers = trigger_onset(cft, trig_on, trig_off)
        for trigger in triggers:
            on = tr.stats.starttime + (trigger[0] / df)
            # off = tr.stats.starttime + (trigger[1] / df)
            wav_id = WaveformStreamID(station_code=tr.stats.station,
                                      channel_code=tr.stats.channel,
                                      network_code=tr.stats.network)
            p = Pick(waveform_id=wav_id, phase_hint=phase, time=on)
            Logger.info('Pick made: {0}'.format(p))
            picks.append(p)
    # QC picks
    pick_stations = list(set([pick.waveform_id.station_code
                              for pick in picks]))
    for pick_station in pick_stations:
        station_picks = [
            pick for pick in picks
            if pick.waveform_id.station_code == pick_station
        ]
        # If P-pick is after S-picks, remove it.
        p_time = [
            pick.time for pick in station_picks if pick.phase_hint == 'P'
        ]
        s_time = [
            pick.time for pick in station_picks if pick.phase_hint == 'S'
        ]
        if p_time > s_time:
            p_pick = [pick for pick in station_picks if pick.phase_hint == 'P']
            for pick in p_pick:
                Logger.info('P pick after S pick, removing P pick')
                picks.remove(pick)
    event.picks = picks
    if show:
        plotting.pretty_template_plot(stream,
                                      event=event,
                                      title='Autopicks',
                                      size=(8, 9))
    if len(event.picks) > 0:
        event.origins[0].time = min([pick.time for pick in event.picks]) - 1
        # event.origins[0].latitude = float('nan')
        # event.origins[0].longitude = float('nan')
    # Set arbitrary origin time
    return event
예제 #31
0
def stalta_pick(stream, stalen, ltalen, trig_on, trig_off, freqmin=False,
                freqmax=False, debug=0, show=False):
    """
    Basic sta/lta picker, suggest using alternative in obspy.
    Simple sta-lta (short-term average/long-term average) picker, using \
    obspy's stalta routine to generate the characteristic function.

    Currently very basic quick wrapper, there are many other (better) options \
    in obspy, found \
    `here <http://docs.obspy.org/packages/autogen/obspy.signal.trigger.html>`_.

    :type stream: obspy.Stream
    :param stream: The stream to pick on, can be any number of channels.
    :type stalen: float
    :param stalen: Length of the short-term average window in seconds.
    :type ltalen: float
    :param ltalen: Length of the long-term average window in seconds.
    :type trig_on: float
    :param trig_on: sta/lta ratio to trigger a detection/pick
    :type trig_off: float
    :param trig_off: sta/lta ratio to turn the trigger off - no further picks\
        will be made between exceeding trig_on until trig_off is reached.
    :type freqmin: float
    :param freqmin: Low-cut frequency in Hz for bandpass filter
    :type freqmax: float
    :param freqmax: High-cut frequency in Hz for bandpass filter
    :type debug: int
    :param debug: Debug output level from 0-5.
    :type show: bool
    :param show: Show picks on waveform.

    :returns: obspy.core.event.Event

    .. rubric:: Example

    >>> from obspy import read
    >>> from eqcorrscan.utils.picker import stalta_pick
    >>> st = read()
    >>> event = stalta_pick(st, stalen=0.2, ltalen=4, trig_on=10,
    ...             trig_off=1, freqmin=3.0, freqmax=20.0)
    >>> event.creation_info.author
    'EQcorrscan'
    """
    from obspy.signal.trigger import classic_sta_lta, trigger_onset
    from obspy.signal.trigger import plot_trigger
    from obspy import UTCDateTime
    from obspy.core.event import Event, Pick, WaveformStreamID
    from obspy.core.event import CreationInfo, Comment, Origin
    import eqcorrscan.utils.plotting as plotting

    event = Event()
    event.origins.append(Origin())
    event.creation_info = CreationInfo(author='EQcorrscan',
                                       creation_time=UTCDateTime())
    event.comments.append(Comment(text='stalta'))
    picks = []
    for tr in stream:
        # We are going to assume, for now, that if the pick is made on the
        # horizontal channel then it is an S, otherwise we will assume it is
        # a P-phase: obviously a bad assumption...
        if tr.stats.channel[-1] == 'Z':
            phase = 'P'
        else:
            phase = 'S'
        if freqmin and freqmax:
            tr.detrend('simple')
            tr.filter('bandpass', freqmin=freqmin, freqmax=freqmax,
                      corners=3, zerophase=True)
        df = tr.stats.sampling_rate
        cft = classic_sta_lta(tr.data, int(stalen * df), int(ltalen * df))
        if debug > 3:
            plot_trigger(tr, cft, trig_on, trig_off)
        triggers = trigger_onset(cft, trig_on, trig_off)
        for trigger in triggers:
            on = tr.stats.starttime + (trigger[0] / df)
            # off = tr.stats.starttime + (trigger[1] / df)
            wav_id = WaveformStreamID(station_code=tr.stats.station,
                                      channel_code=tr.stats.channel,
                                      network_code=tr.stats.network)
            pick = Pick(waveform_id=wav_id, phase_hint=phase, time=on)
            if debug > 2:
                print('Pick made:')
                print(pick)
            picks.append(pick)
    # QC picks
    del pick
    pick_stations = list(set([pick.waveform_id.station_code for pick in picks]))
    for pick_station in pick_stations:
        station_picks = [pick for pick in picks if
                         pick.waveform_id.station_code == pick_station]
        # If P-pick is after S-picks, remove it.
        p_time = [pick.time for pick in station_picks if pick.phase_hint == 'P']
        s_time = [pick.time for pick in station_picks if pick.phase_hint == 'S']
        if p_time > s_time:
            p_pick = [pick for pick in station_picks if pick.phase_hint == 'P']
            for pick in p_pick:
                print('P pick after S pick, removing P pick')
                picks.remove(pick)
    if show:
        plotting.pretty_template_plot(stream, picks=picks, title='Autopicks',
                                      size=(8, 9))
    event.picks = picks
    event.origins[0].time = min([pick.time for pick in event.picks]) - 1
    event.origins[0].latitude = float('nan')
    event.origins[0].longitude = float('nan')
    # Set arbitrary origin time
    return event
예제 #32
0
    fig = plt.figure(1)
    #fig = plt.figure(1, figsize=(8, 4))
    fig.clf()
    ax = fig.add_subplot(211)
    ax.plot(t, tr.data, 'black')
    ax2 = fig.add_subplot(212,sharex=ax)
    ax2.plot(t, cft, 'black')
    onof = np.array(triggerOnset(cft, thr1, thr2))
    i,j = ax.get_ylim()
    try:
        ax.vlines(onof[:,0]/df, i, j, color='red', lw = 2)
        ax.vlines(onof[:,1]/df, i, j, color='blue', lw = 2)
    except IndexError:
        pass
    ax2.axhline(thr1, color='red', lw = 1, ls = '--')
    ax2.axhline(thr2, color='blue', lw = 1, ls = '--')
    fig.canvas.draw()
    plt.show()

def plot_threaded(tr, cft, thr1, thr2):
    thread = threading.Thread(target=plot_trigger, args=(tr, cft, thr1, thr2))
    thread.start()


st=read("data/ev0_6.a04.gse2")[0]
#st=read("data/ev0_6.a04.gse2") is a stream object
#st=read("data/ev0_6.a04.gse2")[0] is a trace object
df = st.stats.sampling_rate

cft = classic_sta_lta(st, int(5 * df), int(10 * df))
plot_trigger(st, cft, 1.5, 0.5)
예제 #33
0
    def trigger_events(self,
                       ftmin,
                       ftmax,
                       nsta,
                       nlta,
                       thrsh1,
                       thrsh2,
                       num_trig,
                       plot=True):
        """
        Function to trigger events from continuous data.
        :param ftmin: lower corner frequency used for filtering prior to triggering
        :param ftmax: upper corner frequency used for filtering prior to triggering
        :param nsta: number of samples short term average
        :param nlta: number of samples long term average
        :param thrsh1: nsta/nlta threshold to trigger
        :param thrsh2: nsta/nlta threshold to stop trigger
        :param num_trig: number of stations required to eventually trigger an event
        :param plot: if true, gives an overview of triggered events
        """
        print("TRIGGER EVENTS ...")
        print("STA: %i samples" % nsta)
        print("LTA: %i samples" % nlta)
        print("-------------------------------------------------")

        # trigger on continuous data from all array stations
        trig_times = {}
        dict_env_maxs = {}
        # read data
        for stn in self.stnlist:
            try:
                st = read(self.path2mseed + "%s/%s.D/4D.%s..%s.D.2016.%i" %
                          (stn, self.chn, stn, self.chn, self.jday))
            except:
                print("%s: no data!!!" % stn)
                print("skip this day!")
                sys.exit(1)
            # adjust sampling rate
            for tr in st:
                if tr.stats.sampling_rate != self.fs:
                    tr.resample(self.fs)
            if self.decfact > 1:
                st.decimate(self.decfact)
            # trim and filter
            t1 = UTCDateTime(2016, 1, 1)
            t1.julday = self.jday
            t2 = t1 + 24. * 60. * 60.
            st.trim(t1, t2)
            dt = st[0].stats.delta
            st.filter("bandpass", freqmin=ftmin, freqmax=ftmax, zerophase=True)

            # for station, trigger all traces and count number of events
            n_ev = 0
            for tr in st:
                cft = classic_sta_lta(tr.data, nsta, nlta)
                on_off = trigger_onset(cft, thrsh1, thrsh2)
                if plot:
                    plot_trigger(tr, cft, thrsh1, thrsh2)
                if len(on_off) > 0:
                    n_ev += on_off.shape[0]

            # trigger again and convert trigger time to timestring format
            ons = np.zeros(n_ev)
            offs = np.zeros(n_ev)
            ind = 0
            for i, tr in enumerate(st):
                cft = classic_sta_lta(tr.data, nsta, nlta)
                trig = trigger_onset(cft, thrsh1, thrsh2)
                if len(trig) > 0:
                    ons[ind:ind + trig.shape[0]] = trig.astype(
                        float)[:, 0] * dt + tr.stats.starttime.timestamp
                    offs[ind:ind + trig.shape[0]] = trig.astype(
                        float)[:, 1] * dt + tr.stats.starttime.timestamp
                    ind += trig.shape[0]

            # remove events which are triggered within one second after an event
            tt = 3.
            ind_del = []
            for i in range(len(ons) - 1):
                print(ons[i + 1] - ons[i])
                if (ons[i + 1] - ons[i]) < tt:
                    ind_del.append(i + 1)
            ons = np.delete(ons, ind_del)
            offs = np.delete(offs, ind_del)
            print("%s: %i events detected!" % (stn, len(ons)))

            # recalculate on/off times (defined as the interval where the envelope is greater than 0.2 times its max)
            n = 0.4
            p = 0.6
            env_maxs = np.zeros(len(ons))
            for i in range(len(ons)):
                ts = UTCDateTime(ons[i]) - n
                te = UTCDateTime(offs[i]) + p
                st_ = read(self.path2mseed + "%s/%s.D/4D.%s..%s.D.2016.%i" %
                           (stn, self.chn, stn, self.chn, self.jday),
                           starttime=ts - 5,
                           endtime=te + 5)
                # adjust sampling rate
                for tr in st_:
                    if tr.stats.sampling_rate != self.fs:
                        tr.resample(self.fs)
                st_.filter("bandpass",
                           freqmin=self.fmin,
                           freqmax=self.fmax,
                           zerophase=True)
                st_.trim(ts, te)
                # calc envelope
                env = obspy.signal.filter.envelope(st_[0].data)
                env_max = np.max(env)
                env_maxs[i] = ts.timestamp + np.argmax(env) * dt
                ind_greater = np.where(env >= 0.2 * env_max)[0]
                ediff = np.ediff1d(ind_greater)
                ind_greater = np.split(ind_greater,
                                       np.where(ediff != 1)[0] + 1)

                for j in range(len(ind_greater)):
                    if np.argmax(env) in ind_greater[j]:
                        ons[i] = ts.timestamp + ind_greater[j][0] * dt
                        offs[i] = ts.timestamp + ind_greater[j][-1] * dt
                        #plt.plot(st_[0].data)
                        #plt.plot(env, "k:")
                        #plt.plot(ind_greater[j], env[ind_greater[j]], "r")
                        #plt.show()
            on_off_ = np.zeros((len(ons), 2))
            on_off_[:, 0] = ons
            on_off_[:, 1] = offs
            trig_times[stn] = on_off_
            dict_env_maxs[stn] = env_maxs

        # take only events which are triggered on at least num_trig stations (take travel time for very slow velocities
        #  across the array radius as limit)
        tt = self.r / 1000.
        self.stnlist.sort()
        cstn = self.stnlist[4]

        on_off_ = []
        env_max_PMx5 = dict_env_maxs[cstn]
        for i in range(len(env_max_PMx5)):
            env_PMx5 = env_max_PMx5[i]
            count = 0
            for j in range(len(self.stnlist) - 1):
                stn = self.stnlist[j]
                ind = np.where(((dict_env_maxs[stn] - tt) < env_PMx5)
                               & (env_PMx5 < (dict_env_maxs[stn] + tt)))[0]
                if len(ind) == 1:
                    count += 1
            if count >= (num_trig - 1):
                add = np.array(trig_times[cstn][i])
                on_off_.append(add)
        print("--> %i events detected on >= %i stations!" %
              (len(on_off_), num_trig))
        print("------------------------------------------")

        # convert to numpy array
        on_off = np.zeros((len(on_off_), 2))
        for i in range(len(on_off_)):
            on_off[i, :] = on_off_[i]

        return on_off
예제 #34
0
#!/usr/bin/env python3
from obspy.signal.trigger import classic_sta_lta
import numpy as np

if __name__ == "__main__":
    ifl = open('gse2.txt', 'r')
    cdat = ifl.read()
    ifl.close()
    cdat = cdat.split("\n")
    trace = np.zeros(len(cdat) - 1)
    for i in range(len(trace)):
        trace[i] = float(cdat[i])
    df = 200
    print(5 * df, 10 * df)
    cft = classic_sta_lta(trace.data, int(5 * df), int(10 * df))
    ofl = open('classicSTALTA_ref.txt', 'w')
    for i in range(len(cft)):
        ofl.write("%.10f\n" % cft[i])
    ofl.close()
예제 #35
0
def getEnergies(signal, energies_df, before, stalta_lowerlim=1.0, 
                stalta_upperlim=2.0, taper_length=0.0, smoothwin=301, 
                smoothorder=3, check_calcs=False):
    """
    Calculates the pseudo-energy for an input signal by determining the event
    start and end times using a comination of the signal envelope and the
    STA/LTA of the signal, and computing the area below the envelope between
    these two times.
    INPUTS
    signal (numpy array) - seismic signal trace from obspy stream object,
        cropped around event
    energies_df (pandas DataFrame) - table of pseudo-energies returned from previous 
        events in list, if evaluating landslide sequences, or empty table if 
        evaluating only one event
    before (float) - seconds before trigger time that signal was cropped; used
        to get initial event time
    stalta_lowerlim (float) - optional; lower STA/LTA threshold for locating 
        signal minima, used for determining event start and end times
    stalta_upperlim (float) - optional; upper STA/LTA threshold for locating 
        event and determining event start and end times
    taper_length (float) - optional; length in seconds of taper at start of signal
    smoothwin (int) - optional; window length in samples for Savgol smoothing 
        of envelopes
    smoothorder (int) - optional; polynomial order for Savgol smoothing
    check_calcs (boolean) - optional; setting to True plots the signal with the 
        calculated start and end times, as well as all minima, maxima, and event
        time that were used to perform the calculations
    OUTPUT
    energies_df (pandas DataFrame) - updated table of pseudo-energies for all 
        events evaluated in sequence
    """
    # Calculate envelope of signal
    signal_envelope = signal.copy()
    signal_envelope.data = filte.envelope(signal_envelope.data)
    signal_envelope.data = spsignal.savgol_filter(signal_envelope.data, 
                                                  smoothwin, smoothorder)

    # Calculate STA/LTA of signal
    samp_rate = signal_envelope.stats.sampling_rate # Hz
    sta_lta = classic_sta_lta(signal_envelope, int(3*samp_rate), 
                              int(10*samp_rate))
    sta_lta = spsignal.savgol_filter(sta_lta, smoothwin, smoothorder)
    
    # Get index of event time in signal
    eventtime_index = int(before*samp_rate)
    event_time = signal_envelope.stats.starttime + before

    # Find peaks in sta_lta
    peaks, mins = sigproc.peakdet(sta_lta, stalta_lowerlim)
    peak_inds = [int(peak[0]) for peak in peaks]
    
    # Find minima in sta_lta signal
    peaks, mins = sigproc.peakdet(sta_lta, 0.05)
    min_inds = [int(mini[0]) for mini in mins]

    # Only keep peaks that are within desired range
    # (Not in taper, not at end of signal, not before last detected event's end time) 
    lower_search_indx = int(taper_length*samp_rate)
    higher_search_indx = int(2*before*samp_rate)
    
    # Convert search limits to UTCDateTimes
    lower_search_time = signal_envelope.stats.starttime + lower_search_indx/samp_rate
    higher_search_time = signal_envelope.stats.starttime + higher_search_indx/samp_rate
    
    # Get end time of last event
    if len(energies_df) == 0:
        previous_end_time = signal_envelope.stats.starttime
    else:
        previous_end_time = UTCDateTime(energies_df['Trigger times'].values[-1]) - \
                            before + energies_df['End times'].values[-1]/samp_rate 
    
    # If end time of last event outside of search window, return dataframe
    # without updating                        
    if previous_end_time > higher_search_time:
        return(energies_df)
    
    # If end time of last event is within search window but greater than
    # lower search limit, match lower search limit to previous end time
    if previous_end_time > lower_search_time and previous_end_time < higher_search_time:
        time_diff = previous_end_time - lower_search_time
        lower_search_indx += int(time_diff*samp_rate)
        higher_search_indx += int(time_diff*samp_rate)
        
    # If triggering event time is before lower limit of search window, 
    # return dataframe without updating
    if lower_search_indx >= (eventtime_index - int(samp_rate)):
        return(energies_df)
        
    # Find peaks in search window, look for peaks greater tha upper sta/lta limit
    peak_inds1 = []
    peak_inds2 = []
    for peak in peak_inds:
        if peak > lower_search_indx and peak < higher_search_indx:
            peak_inds1.append(peak)  
            if sta_lta[peak] >= stalta_upperlim:
                peak_inds2.append(peak)                
    
    # Find largest peak in range
    signal_peak = lower_search_indx +\
                  np.argmax(signal_envelope[lower_search_indx:higher_search_indx])
                  
    # If there are peaks above sta/lta threshold and within search window, 
    # find peak that is closest to max amplitude in search window
    if len(peak_inds2) > 0:
        stalta_peak = peak_inds2[0]
        for peak in peak_inds2:
            min_diff = abs(signal_peak - stalta_peak)
            if abs(signal_peak - peak) < min_diff:
                stalta_peak = peak 
                
    # If there are peaks within search window but not above threshold,
    # find peak that is closest to max amplitude in search window
    elif len(peak_inds1) > 0:
        stalta_peak = peak_inds1[0]
        for peak in peak_inds1:
            min_diff = abs(signal_peak - signal_envelope[stalta_peak])
            if abs(signal_peak - peak) < min_diff:
                stalta_peak = peak  
                
    # If there aren't any peaks within search window, pick the biggest peak 
    else:
        stalta_peak = np.argmax(sta_lta[peak_inds])
        lower_search_indx = 0
    
    # Find minima with sta_lta < min threshold
    min_inds1 = []
    for mini in min_inds:
        if sta_lta[mini] <= stalta_lowerlim:
            min_inds1.append(mini)
            
    # Locate min above lower sta/lta threshold that is immediately before 
    # first peak above upper sta/lta threshold for event start time
    # If no peaks above upper sta/lta threshold, select smallest min immediately
    # preceding stalta_peak
    # If neither conditions are met, choose lower_search_indx
    if len(min_inds1) > 0:
        peak_list = []
        if len(peak_inds2) > 0:
            for peak in peak_inds2:
                if peak < stalta_peak:
                    peak_list.append(peak)
        if len(peak_list) == 0:
            peak_list = [stalta_peak]

        compare_peak = max(peak_list)
        
        reverse_min_inds1 = min_inds1
        reverse_min_inds1.sort(reverse=True)

        checkmin = compare_peak
        for mini in reverse_min_inds1:
            if mini < checkmin:
                checkmin = mini
            else:
                starttime = checkmin
                break
    else:
        starttime = lower_search_indx
                
    # Find min where signal returns to starttime amplitude to get end time
    # If more peaks above upper sta/lta threshold after stalta_peak and before 
    # upper_search_indx, endtime must be after this peak
    # If no endtime found, choose last index of signal
    last_peak = stalta_peak
    for peak in peak_inds2:
        if peak > stalta_peak:
            last_peak = peak
    endtime = len(signal_envelope) - 1
    noise_level = 0.10 # percent of signal amplitude that is noise
    for i in range(last_peak+1, len(signal_envelope) - 1):
        if signal_envelope[i] <= (1+noise_level)*signal_envelope[starttime]:
            endtime = i
            break
     
    # Integrate envelope between signal start and end times to get pseudo-energy
    try:
        integral = sum(signal_envelope[starttime:endtime])/samp_rate #  m/s^2
        signal_length = (endtime - starttime)/samp_rate # units are s
        max_amp = max(signal_envelope[starttime:endtime]) # units are m/s
    except:
        integral = -999.
        signal_length = -999.
        max_amp = -999.
        
    # Visualize signal to verify start and end time calculations
    if check_calcs:
        plt.figure()
        plt.subplot(2,1,1)
        plt.plot(signal)
        plt.plot(signal_envelope, 'r', label = 'Envelope')
        line1 = starttime
        line2 = endtime
        plt.axvline(line1, color='k', linestyle='--', label = 'Start and end times')
        plt.axvline(line2, color='k', linestyle='-.')
        plt.title(str(event_time) + '\nSignal with Envelope')
        plt.ylabel('Amplitude (m/s)')
        plt.xlabel('Signal Index')
        plt.legend()
        plt.subplot(2,1,2)
        plt.plot(sta_lta)
        plt.axvline(line1, color='k', linestyle='--')
        plt.axvline(line2, color='k', linestyle='-.')
        plt.axvline(lower_search_indx, color='g', linestyle=':', 
                    label = 'Search window')
        plt.axvline(higher_search_indx, color='g', linestyle=':')
        plt.axvline(eventtime_index, color='b', linestyle=':', 
                    label = 'Event triggering time')
        plt.plot(peak_inds2, sta_lta[peak_inds2], 'r*')
        plt.plot(peak_inds1, sta_lta[peak_inds1], 'r.')
        plt.plot(min_inds, sta_lta[min_inds], 'k.')
        plt.plot(min_inds1, sta_lta[min_inds1], 'k*')
        plt.axvline(stalta_peak, color='r', label = 'Signal peak')
        plt.title('STA/LTA')
        plt.ylabel('Amplitude')
        plt.xlabel('Signal Index')
        plt.subplots_adjust(hspace=0.5)
        plt.show()
    
    # Organize return info into dataframe
    col_names = ['Trigger times', 'Start times', 'End times', 'Peak times', 
                 'Integrals (m/s^2)', 'Max amplitudes (m/s)', 'Signal lengths (s)',
                 'Sampling rates']
    new_energies_df = pd.DataFrame({col_names[0]: [event_time],
                                    col_names[1]: [starttime],
                                    col_names[2]: [endtime],
                                    col_names[3]: [stalta_peak],
                                    col_names[4]: [integral],
                                    col_names[5]: [max_amp],
                                    col_names[6]: [signal_length],
                                    col_names[7]: [samp_rate]}, 
                                   columns = col_names)
    energies_df = pd.concat([energies_df, new_energies_df], ignore_index = True)
    
    return(energies_df)