Exemplo n.º 1
0
def get_multievent_sg(cum_trace, tmin, tmax, sg_params, multievent_thresholds):
    """
    Returns the tuple (or a list of tuples, if the first argument is a stream) of the
    values (score, UTCDateTime of arrival)
    where scores is: 0: no double event, 1: double event inside tmin_tmax,
        2: double event after tmax, 3: both double event previously defined are detected
    If score is 2 or 3, the second argument is the UTCDateTime denoting the occurrence of the
    first sample triggering the double event after tmax
    :param trace: the input obspy.core.Trace
    """
    tmin = utcdatetime(tmin)
    tmax = utcdatetime(tmax)

    # split traces between tmin and tmax and after tmax
    traces = [cum_trace.slice(tmin, tmax), cum_trace.slice(tmax, None)]

    # calculate second derivative and normalize:
    second_derivs = []
    max_ = np.nan
    for ttt in traces:
        sec_der = savitzky_golay(ttt.data, sg_params['wsize'],
                                 sg_params['order'], sg_params['deriv'])
        sec_der_abs = np.abs(sec_der)
        idx = np.nanargmax(sec_der_abs)
        # get max (global) for normalization:
        max_ = np.nanmax([max_, sec_der_abs[idx]])
        second_derivs.append(sec_der_abs)

    # normalize second derivatives:
    for der in second_derivs:
        der /= max_

    result = 0

    # case A: see if after tmax we exceed a threshold
    indices = np.where(
        second_derivs[1] >= multievent_thresholds['after_tmax_inpercent'])[0]
    if len(indices):
        result = 2

    # case B: see if inside tmin tmax we exceed a threshold, and in case check the duration
    deltatime = 0
    starttime = tmin
    endtime = None
    indices = np.where(
        second_derivs[0] >= multievent_thresholds['inside_tmin_tmax_inpercent']
    )[0]
    if len(indices) >= 2:
        idx0 = indices[0]
        starttime = timeof(traces[0], idx0)
        idx1 = indices[-1]
        endtime = timeof(traces[0], idx1)
        deltatime = endtime - starttime
        if deltatime >= multievent_thresholds['inside_tmin_tmax_insec']:
            result += 1

    return result, deltatime, starttime, endtime
Exemplo n.º 2
0
def get_psd_values_df(segment, config):
    """
    Gets the PSD values in form of DataFrame. Does not check if
    the stream has gaps oiverlaps (assumes it has not).
    Checks in the config if the psd values should be calculated on sub windows
    of segment.stream() or not (parameter 'sn_windows')

    :param inventory: if None, uses the segment inventory.
    """
    traces_invs_params = get_traces_from_segment(segment, config)

    sn_wdw = config['sn_windows']
    wlen_sec = sn_wdw['signal_window']
    if wlen_sec:
        atime = utcdatetime(
            segment.arrival_time) + sn_wdw['arrival_time_shift']
        new_traces_invs_params = []
        for (tra, inv, params) in traces_invs_params:
            noi_wdw = [
                tra.slice(None, atime), inv, {
                    **params, WINDOWTYPE_COL: False
                }
            ]
            # window_type True: has signal, False: is noise
            sig_wdw = [
                tra.slice(atime, None), inv, {
                    **params, WINDOWTYPE_COL: True
                }
            ]
            # window_type True: has signal, False: is noise
            new_traces_invs_params.extend([noi_wdw, sig_wdw])
        traces_invs_params = new_traces_invs_params
    else:
        # simply set window_type param, overwriting any setting is present:
        for (tra, inv, params) in traces_invs_params:
            # window_type True: has signal, False: is noise
            params[WINDOWTYPE_COL] = True

    ret_dfs = []
    required_psd_periods = config['psd_periods']
    for tra, inv, params in traces_invs_params:

        # PSD NOISE VALUES:
        required_psd_values = psd_values(
            required_psd_periods, tra,
            segment.inventory() if inv is None else inv)

        # calculates amplitudes at the frequency bins given in the config file:

        # write stuff to csv:
        ret = OrderedDict()

        for period, psdval in zip(required_psd_periods, required_psd_values):
            ret['psd@%ssec' % str(period)] = float(psdval)

        ret = {
            **params,
            'length_sec': tra.stats.endtime - tra.stats.starttime,  # <- float
            **ret
        }

        # # Here we tried to programmatically label artifacts as outliers
        # # But we realised later that the lines below have no effect as
        # # they should be executed BEFORE the creation of `ret` above.
        # # We also realised that it is better to handle these artifacts later
        # # in a Jupyter notebook and put them in a specified data frame.
        # # So, all in all, let's comment them out:
        # if (required_psd_values[~np.isnan(required_psd_values)] <= -1000).all():
        #    params[HANDLABELLED_COL] = True
        #    params[OUTLIER_COL] = True

        ret_dfs.append(ret)

    return pd.DataFrame(ret_dfs)
Exemplo n.º 3
0
def _main(segment, config, raw_trace, inventory_used):
    """
    called by main with supplied inventory_used, which MUST be the inventory used
    on the raw trace to obtain `segment.stream()[0]`
    """
    required_psd_periods = config['psd_periods']

    atime = utcdatetime(segment.arrival_time)

    # trace.slice calls trace.copy().trim:
    traces = [
        (raw_trace.slice(atime - 60, atime), 'n'),
        # (raw_trace.slice(atime-30, atime+30), 'ns'),
        (raw_trace.slice(atime, atime + 60), 's')
    ]

    ret_df = []

    for raw_trace, window_type in traces:
        #         # cumulative of squares:
        #         cum_labels = [0.05, 0.95]
        #         cum_trace = cumsumsq(trace, normalize=True, copy=True)
        #         cum_times = timeswhere(cum_trace, *cum_labels)
        #
        #         # Caluclate PGA and PGV
        #         # FIXME! THERE IS AN ERROR HERE WE SHOULD ITNEGRATE ONLY IF WE HAVE AN
        #         # ACCELEROMETER! ISN't IT?
        #         t_PGA, PGA = maxabs(trace, cum_times[0], cum_times[-1])
        #         trace_int = trace.copy().integrate()
        #         t_PGV, PGV = maxabs(trace_int, cum_times[0], cum_times[-1])
        #
        #         # CALCULATE SPECTRA (SIGNAL and NOISE)
        #         spectra = _sn_spectra(segment, config)
        #         normal_f0, normal_df, normal_spe = spectra['Signal']
        #         noise_f0, noise_df, noise_spe = spectra['Noise']  # @UnusedVariable
        #
        #         # AMPLITUDE (or POWER) SPECTRA VALUES and FREQUENCIES:
        #         required_freqs = config['freqs_interp']
        #         ampspec_freqs = normal_f0 + normal_df * np.arange(len(normal_spe))
        #         required_amplitudes = np.interp(np.log10(required_freqs),
        #                                         np.log10(ampspec_freqs),
        #                                         normal_spe) / segment.sample_rate
        #
        #         # SNR:
        #
        #         fcmin = mag2freq(magnitude)
        #         fcmax = config['preprocess']['bandpass_freq_max']  # used in bandpass_remresp
        #         spectrum_type = config['sn_spectra']['type']
        #         snr_ = snr(normal_spe, noise_spe, signals_form=spectrum_type,
        #                    fmin=fcmin, fmax=fcmax, delta_signal=normal_df, delta_noise=noise_df)

        # PSD NOISE VALUES:
        # FIXME! DO I HAVE TO PASS THE PROCESSED TRACE (AS IT IS) or THE RAW ONE
        # (segment.stream(True)[0])?

        required_psd_values = psd_values(required_psd_periods, raw_trace,
                                         inventory_used)

        # calculates amplitudes at the frequency bins given in the config file:

        # write stuff to csv:
        ret = OrderedDict()

        for period, psdval in zip(required_psd_periods, required_psd_values):
            ret['psd@%ssec' % str(period)] = float(psdval)

        ret['outlier'] = False
        # ret['modified'] = ''
        ret['window_type'] = window_type
        ret['start_time'] = raw_trace.stats.starttime.datetime
        ret['length_sec'] = raw_trace.stats.endtime - raw_trace.stats.starttime

        ret_df.append(ret)

    return pd.DataFrame(ret_df)