コード例 #1
0
def test():
    from sito import read
    from obspy.signal.freqattributes import mper  #, welch
    #from mtspec import mtspec

    ms = read('/home/richter/Data/Parkfield/raw/PKD_1996_296.mseed')
    #ms.plotTrace()

    print ms[0].stats
    # -*- snip -*-
    data = ms[0].data
    data = data - np.mean(data)
    #data -= np.linspace(0,1,len(data))*(data[-1]-data[0])+data[0]

    N = len(data)
    df = 1. / (ms[0].stats.endtime - ms[0].stats.starttime)
    print N // 2 * df

    spec1 = mper(data, cosTaper(N, 0.05), nextpow2(N))[:N // 2]
    #spec2 =  welch(data, cosTaper(N, 0.05), nextpow2(N), len(data)/10, 0.2)[:N//2]
    spec1_d = oct_downsample(spec1, df, fac=1.3)
    freq1 = get_octfreqs(len(spec1_d), df, fac=1.3)

    ax = plot_psd(spec1, log=True)
    ax = plot_psd(spec1_d, freq1, log=True, ax=ax)
    plt.show()
コード例 #2
0
ファイル: psd.py プロジェクト: iceseismic/sito
def test():
    from sito import read
    from obspy.signal.freqattributes import mper  # , welch

    # from mtspec import mtspec

    ms = read("/home/richter/Data/Parkfield/raw/PKD_1996_296.mseed")
    # ms.plotTrace()

    print ms[0].stats
    # -*- snip -*-
    data = ms[0].data
    data = data - np.mean(data)
    # data -= np.linspace(0,1,len(data))*(data[-1]-data[0])+data[0]

    N = len(data)
    df = 1.0 / (ms[0].stats.endtime - ms[0].stats.starttime)
    print N // 2 * df

    spec1 = mper(data, cosTaper(N, 0.05), nextpow2(N))[: N // 2]
    # spec2 =  welch(data, cosTaper(N, 0.05), nextpow2(N), len(data)/10, 0.2)[:N//2]
    spec1_d = oct_downsample(spec1, df, fac=1.3)
    freq1 = get_octfreqs(len(spec1_d), df, fac=1.3)

    ax = plot_psd(spec1, log=True)
    ax = plot_psd(spec1_d, freq1, log=True, ax=ax)
    plt.show()
コード例 #3
0
def check_and_phase_shift(trace):
    # print trace
    taper_length = 20.0
    if trace.stats.npts < 4 * taper_length*trace.stats.sampling_rate:
        trace.data = np.zeros(trace.stats.npts)
        return trace

    dt = np.mod(trace.stats.starttime.datetime.microsecond*1.0e-6,
                trace.stats.delta)
    if (trace.stats.delta - dt) <= np.finfo(float).eps:
        dt = 0
    if dt != 0:
        if dt <= (trace.stats.delta / 2.):
            dt = -dt
#            direction = "left"
        else:
            dt = (trace.stats.delta - dt)
#            direction = "right"
        trace.detrend(type="demean")
        trace.detrend(type="simple")
        taper_1s = taper_length * float(trace.stats.sampling_rate) / trace.stats.npts
        cp = cosTaper(trace.stats.npts, taper_1s)
        trace.data *= cp

        n = int(2**nextpow2(len(trace.data)))
        FFTdata = scipy.fftpack.fft(trace.data, n=n)
        fftfreq = scipy.fftpack.fftfreq(n, d=trace.stats.delta)
        FFTdata = FFTdata * np.exp(1j * 2. * np.pi * fftfreq * dt)
        trace.data = np.real(scipy.fftpack.ifft(FFTdata, n=n)[:len(trace.data)])
        trace.stats.starttime += dt
        return trace
    else:
        return trace
コード例 #4
0
ファイル: spectral_estimation.py プロジェクト: bmorg/obspy
def fft_taper(data):
    """
    Cosine taper, 10 percent at each end (like done by [McNamara2004]_).

    .. warning::
        Inplace operation, so data should be float.
    """
    data *= cosTaper(len(data), 0.2)
    return data
コード例 #5
0
def fft_taper(data):
    """
    Cosine taper, 10 percent at each end (like done by [McNamara2004]_).

    .. warning::
        Inplace operation, so data should be float.
    """
    data *= cosTaper(len(data), 0.2)
    return data
コード例 #6
0
def stream_taper(st):
    """
    Applies cosine taper to a stream (iterates over all available traces).

    :param st: Input stream, processed in place
    :type st: An obspy.stream object

    """
    for tr in st:
        try:
            mytaper = cosTaper(tr.stats.npts)
            t_tr = mytaper * (tr.data)
        except ValueError:
            logging.warn('Trace is too short for tapering - multiplying by 0\
            instead')
            t_tr = 0.0 * (tr.data)
        tr.data = t_tr
    return st
コード例 #7
0
ファイル: OP_waveforms.py プロジェクト: amaggi/waveloc
def stream_taper(st):
    """
    Applies cosine taper to a stream (iterates over all available traces).

    :param st: Input stream, processed in place
    :type st: An obspy.stream object

    """
    for tr in st:
        try:
            mytaper = cosTaper(tr.stats.npts)
            t_tr = mytaper*(tr.data)
        except ValueError:
            logging.warn('Trace is too short for tapering - multiplying by 0\
            instead')
            t_tr = 0.0*(tr.data)
        tr.data = t_tr
    return st
コード例 #8
0
def convSTF(st, sigma=30.):

    gauss = lambda (t, s): 1. / (2. * np.pi * s**2.)**.5 \
                           * np.exp(-1*(t**2)/(2*(s**2)))

    df = st[0].stats.sampling_rate
    dt = 1./df

    t = np.linspace(0., sigma * 20., sigma * 20 * df + 1)
    stf = gauss((t-sigma*10, sigma))
    nstf = len(stf)

    for tr in st:
        tr.data *= cosTaper(len(tr.data), p=0.05)
        nfft = util.nextpow2(max(nstf, tr.stats.npts)) * 2
        stff = np.fft.rfft(stf, n=nfft) * dt
        trf = np.fft.rfft(tr, n=nfft) * dt
        tr.data = np.fft.irfft(stff * trf)[sigma*10*df:sigma*10*df+len(tr.data)] * df

    return 1
コード例 #9
0
ファイル: cross_correlation.py プロジェクト: gthompson/obspy
def xcorrPickCorrection(pick1, trace1, pick2, trace2, t_before, t_after,
                        cc_maxlag, filter=None, filter_options={}, plot=False,
                        filename=None):
    """
    Calculate the correction for the differential pick time determined by cross
    correlation of the waveforms in narrow windows around the pick times.
    For details on the fitting procedure refer to [Deichmann1992]_.

    The parameters depend on the epicentral distance and magnitude range. For
    small local earthquakes (Ml ~0-2, distance ~3-10 km) with consistent manual
    picks the following can be tried::

        t_before=0.05, t_after=0.2, cc_maxlag=0.10,
        filter="bandpass", filter_options={'filter_low': 1, 'filter_high': 20}

    The appropriate parameter sets can and should be determined/verified
    visually using the option `show=True` on a representative set of picks.

    To get the corrected differential pick time calculate: ``((pick2 +
    pick2_corr) - pick1)``. To get a corrected differential travel time using
    origin times for both events calculate: ``((pick2 + pick2_corr - ot2) -
    (pick1 - ot1))``

    :type pick1: :class:`~obspy.core.utcdatetime.UTCDateTime`
    :param pick1: Time of pick for `trace1`.
    :type trace1: :class:`~obspy.core.trace.Trace`
    :param trace1: Waveform data for `pick1`. Add some time at front/back.
            The appropriate part of the trace is used automatically.
    :type pick2: :class:`~obspy.core.utcdatetime.UTCDateTime`
    :param pick2: Time of pick for `trace2`.
    :type trace2: :class:`~obspy.core.trace.Trace`
    :param trace2: Waveform data for `pick2`. Add some time at front/back.
            The appropriate part of the trace is used automatically.
    :type t_before: float
    :param t_before: Time to start cross correlation window before pick times
            in seconds.
    :type t_after: float
    :param t_after: Time to end cross correlation window after pick times in
            seconds.
    :type cc_maxlag: float
    :param cc_maxlag: Maximum lag time tested during cross correlation in
            seconds.
    :type filter: string
    :param filter: None for no filtering or name of filter type
            as passed on to :meth:`~obspy.core.Trace.trace.filter` if filter
            should be used. To avoid artifacts in filtering provide
            sufficiently long time series for `trace1` and `trace2`.
    :type filter_options: dict
    :param filter_options: Filter options that get passed on to
            :meth:`~obspy.core.Trace.trace.filter` if filtering is used.
    :type plot: bool
    :param plot: Determines if pick is refined automatically (default, ""),
            if an informative matplotlib plot is shown ("plot"), or if an
            interactively changeable PyQt Window is opened ("interactive").
    :type filename: string
    :param filename: If plot option is selected, specifying a filename here
            (e.g. 'myplot.pdf' or 'myplot.png') will output the plot to a file
            instead of opening a plot window.
    :rtype: (float, float)
    :returns: Correction time `pick2_corr` for `pick2` pick time as a float and
            corresponding correlation coefficient.
    """
    # perform some checks on the traces
    if trace1.stats.sampling_rate != trace2.stats.sampling_rate:
        msg = "Sampling rates do not match: %s != %s" % \
            (trace1.stats.sampling_rate, trace2.stats.sampling_rate)
        raise Exception(msg)
    if trace1.id != trace2.id:
        msg = "Trace ids do not match: %s != %s" % (trace1.id, trace2.id)
        warnings.warn(msg)
    samp_rate = trace1.stats.sampling_rate
    # check data, apply filter and take correct slice of traces
    slices = []
    for _i, (t, tr) in enumerate(((pick1, trace1), (pick2, trace2))):
        start = t - t_before - (cc_maxlag / 2.0)
        end = t + t_after + (cc_maxlag / 2.0)
        duration = end - start
        # check if necessary time spans are present in data
        if tr.stats.starttime > start:
            msg = "Trace %s starts too late." % _i
            raise Exception(msg)
        if tr.stats.endtime < end:
            msg = "Trace %s ends too early." % _i
            raise Exception(msg)
        if filter and start - tr.stats.starttime < duration:
            msg = "Artifacts from signal processing possible. Trace " + \
                  "%s should have more additional data at the start." % _i
            warnings.warn(msg)
        if filter and tr.stats.endtime - end < duration:
            msg = "Artifacts from signal processing possible. Trace " + \
                  "%s should have more additional data at the end." % _i
            warnings.warn(msg)
        # apply signal processing and take correct slice of data
        if filter:
            tr.data = tr.data.astype("float64")
            tr.detrend(type='demean')
            tr.data *= cosTaper(len(tr), 0.1)
            tr.filter(type=filter, **filter_options)
        slices.append(tr.slice(start, end))
    # cross correlate
    shift_len = int(cc_maxlag * samp_rate)
    _cc_shift, cc_max, cc = xcorr(slices[0].data, slices[1].data,
                                  shift_len, full_xcorr=True)
    cc_curvature = np.concatenate((np.zeros(1), np.diff(cc, 2), np.zeros(1)))
    cc_convex = np.ma.masked_where(np.sign(cc_curvature) >= 0, cc)
    cc_concave = np.ma.masked_where(np.sign(cc_curvature) < 0, cc)
    # check results of cross correlation
    if cc_max < 0:
        msg = "Absolute maximum is negative: %.3f. " % cc_max + \
              "Using positive maximum: %.3f" % max(cc)
        warnings.warn(msg)
        cc_max = max(cc)
    if cc_max < 0.8:
        msg = "Maximum of cross correlation lower than 0.8: %s" % cc_max
        warnings.warn(msg)
    # make array with time shifts in seconds corresponding to cc function
    cc_t = np.linspace(-cc_maxlag, cc_maxlag, shift_len * 2 + 1)
    # take the subportion of the cross correlation around the maximum that is
    # convex and fit a parabola.
    # use vertex as subsample resolution best cc fit.
    peak_index = cc.argmax()
    first_sample = peak_index
    # XXX this could be improved..
    while first_sample > 0 and cc_curvature[first_sample - 1] <= 0:
        first_sample -= 1
    last_sample = peak_index
    while last_sample < len(cc) - 1 and cc_curvature[last_sample + 1] <= 0:
        last_sample += 1
    if first_sample == 0 or last_sample == len(cc) - 1:
        msg = "Fitting at maximum lag. Maximum lag time should be increased."
        warnings.warn(msg)
    # work on subarrays
    num_samples = last_sample - first_sample + 1
    if num_samples < 3:
        msg = "Less than 3 samples selected for fit to cross " + \
              "correlation: %s" % num_samples
        raise Exception(msg)
    if num_samples < 5:
        msg = "Less than 5 samples selected for fit to cross " + \
              "correlation: %s" % num_samples
        warnings.warn(msg)
    # quadratic fit for small subwindow
    coeffs, residual = scipy.polyfit(
        cc_t[first_sample:last_sample + 1],
        cc[first_sample:last_sample + 1], deg=2, full=True)[:2]
    # check results of fit
    if coeffs[0] >= 0:
        msg = "Fitted parabola opens upwards!"
        warnings.warn(msg)
    if residual > 0.1:
        msg = "Residual in quadratic fit to cross correlation maximum " + \
              "larger than 0.1: %s" % residual
        warnings.warn(msg)
    # X coordinate of vertex of parabola gives time shift to correct
    # differential pick time. Y coordinate gives maximum correlation
    # coefficient.
    dt = -coeffs[1] / 2.0 / coeffs[0]
    coeff = (4 * coeffs[0] * coeffs[2] - coeffs[1] ** 2) / (4 * coeffs[0])
    # this is the shift to apply on the time axis of `trace2` to align the
    # traces. Actually we do not want to shift the trace to align it but we
    # want to correct the time of `pick2` so that the traces align without
    # shifting. This is the negative of the cross correlation shift.
    dt = -dt
    pick2_corr = dt
    # plot the results if selected
    if plot is True:
        import matplotlib
        if filename:
            matplotlib.use('agg')
        import matplotlib.pyplot as plt
        fig = plt.figure()
        ax1 = fig.add_subplot(211)
        tmp_t = np.linspace(0, len(slices[0]) / samp_rate, len(slices[0]))
        ax1.plot(tmp_t, slices[0].data / float(slices[0].data.max()), "k",
                 label="Trace 1")
        ax1.plot(tmp_t, slices[1].data / float(slices[1].data.max()), "r",
                 label="Trace 2")
        ax1.plot(tmp_t - dt, slices[1].data / float(slices[1].data.max()), "g",
                 label="Trace 2 (shifted)")
        ax1.legend(loc="lower right", prop={'size': "small"})
        ax1.set_title("%s" % slices[0].id)
        ax1.set_xlabel("time [s]")
        ax1.set_ylabel("norm. amplitude")
        ax2 = fig.add_subplot(212)
        ax2.plot(cc_t, cc_convex, ls="", marker=".", c="k",
                 label="xcorr (convex)")
        ax2.plot(cc_t, cc_concave, ls="", marker=".", c="0.7",
                 label="xcorr (concave)")
        ax2.plot(cc_t[first_sample:last_sample + 1],
                 cc[first_sample:last_sample + 1], "b.",
                 label="used for fitting")
        tmp_t = np.linspace(cc_t[first_sample], cc_t[last_sample],
                            num_samples * 10)
        ax2.plot(tmp_t, scipy.polyval(coeffs, tmp_t), "b", label="fit")
        ax2.axvline(-dt, color="g", label="vertex")
        ax2.axhline(coeff, color="g")
        ax2.set_xlabel("%.2f at %.3f seconds correction" % (coeff, -dt))
        ax2.set_ylabel("correlation coefficient")
        ax2.set_ylim(-1, 1)
        ax2.legend(loc="lower right", prop={'size': "x-small"})
        #plt.legend(loc="lower left")
        if filename:
            fig.savefig(fname=filename)
        else:
            plt.show()

    return (pick2_corr, coeff)
コード例 #10
0
ファイル: MWCS.py プロジェクト: qingkaikong/MSNoise
def mwcs(ccCurrent, ccReference, fmin, fmax, sampRate, tmin, windL, step):
    """...

    Parameters
    ----------
    ccCurrent : numpy.ndarray
        The "Current" timeseries
    ccReference : numpy.ndarray
        The "Reference" timeseries
    fmin : int
        The lower frequency bound to compute the dephasing
    fmax : int
        The higher frequency bound to compute the dephasing
    sampRate : int
        The sample rate of the input timeseries
    tmin : int
        The leftmost time lag (used to compute the "time lags array")
    windL : int
        The moving window length
    step : int
        The step to jump for the moving window


    Returns
    -------
    data : numpy.ndarray
        Taxis,deltaT,deltaErr,deltaMcoh"""

    windL = np.int(windL*sampRate)
    step = np.int(step*sampRate)
    count = 0
    deltaT = []
    deltaErr = []
    deltaMcoh = []
    Taxis = []
    padd = 2**(nextpow2(windL)+1)
    tp = cosTaper(windL, 0.02)
    timeaxis = (np.arange(len(ccCurrent)) / float(sampRate))+tmin

    minind = 0
    maxind = windL
    while maxind <= len(ccCurrent):
        ind = minind
        cci = ccCurrent[ind:(ind+windL)].copy()
        cci -= np.mean(cci)
        cci *= tp

        cri = ccReference[ind:(ind+windL)].copy()
        cri -= np.mean(cri)
        cri *= tp

        Fcur = scipy.fftpack.fft(cci, n=int(padd))[:padd/2]
        Fref = scipy.fftpack.fft(cri, n=int(padd))[:padd/2]

        Fcur2 = np.real(Fcur)**2 + np.imag(Fcur)**2
        Fref2 = np.real(Fref)**2 + np.imag(Fref)**2

        dcur = np.sqrt(smooth(Fcur2, window='hanning'))
        dref = np.sqrt(smooth(Fref2, window='hanning'))

        #Calculate the cross-spectrum
        X = Fref*(Fcur.conj())
        X = smooth(X, window='hanning')
        dcs = np.abs(X)

        #Find the values the frequency range of interest
        freqVec = scipy.fftpack.fftfreq(len(X)*2, 1./sampRate)[:padd/2]
        indRange = np.argwhere(np.logical_and(freqVec >= fmin,
                                              freqVec <= fmax))

        # Get Coherence and its mean value
        coh = getCoherence(dcs, dref, dcur)
        mcoh = np.mean(coh[indRange])

        #Get Weights
        w = 1.0 / (1.0 / (coh[indRange]**2) - 1.0)
        w[coh[indRange] >= 0.99] = 1.0 / (1.0 / 0.9801 - 1.0)
        w = np.sqrt(w * np.sqrt(dcs[indRange]))
        # w /= (np.sum(w)/len(w)) #normalize
        w = np.real(w)

        #Frequency array:
        v = np.real(freqVec[indRange]) * 2 * np.pi
        # vo = np.real(freqVec) * 2 * np.pi

        #Phase:
        phi = np.angle(X)
        phi[0] = 0
        phi = np.unwrap(phi)
        # print phi[0]
        # phio = phi
        phi = phi[indRange]

        #Calculate the slope with a weighted least square linear regression
        #forced through the origin
        #weights for the WLS must be the variance !
        res = sm.regression.linear_model.WLS(phi, v, w**2).fit()

        m = np.real(res.params[0])
        deltaT.append(m)

        # print phi.shape, v.shape, w.shape
        e = np.sum((phi-m*v)**2) / (np.size(v)-1)
        s2x2 = np.sum(v**2 * w**2)
        sx2 = np.sum(w * v**2)
        e = np.sqrt(e*s2x2 / sx2**2)
        # print w.shape
        # plt.plot(vo, phio)
        # plt.scatter(v,phi,c=w)
        # plt.plot(vo,vo*m)
        # plt.xlim(-1,10)
        # plt.ylim(-5,5)
        # plt.show()

        deltaErr.append(e)
        # print m, e, res.bse[0]
        deltaMcoh.append(np.real(mcoh))
        Taxis.append(timeaxis[ind + windL/2])
        count = count + 1

        minind += step
        maxind += step
        del Fcur, Fref
        del X
        del freqVec
        del indRange
        del w, v, e, s2x2
        del res

    if maxind > len(ccCurrent)+step:
        logging.warning("The last window was too small, but was computed")

    return np.array([Taxis, deltaT, deltaErr, deltaMcoh]).T
コード例 #11
0
def xcorrPickCorrection(pick1,
                        trace1,
                        pick2,
                        trace2,
                        t_before,
                        t_after,
                        cc_maxlag,
                        filter=None,
                        filter_options={},
                        plot=False,
                        filename=None):
    """
    Calculate the correction for the differential pick time determined by cross
    correlation of the waveforms in narrow windows around the pick times.
    For details on the fitting procedure refer to [Deichmann1992]_.

    The parameters depend on the epicentral distance and magnitude range. For
    small local earthquakes (Ml ~0-2, distance ~3-10 km) with consistent manual
    picks the following can be tried::

        t_before=0.05, t_after=0.2, cc_maxlag=0.10,
        filter="bandpass", filter_options={'freqmin': 1, 'freqmax': 20}

    The appropriate parameter sets can and should be determined/verified
    visually using the option `show=True` on a representative set of picks.

    To get the corrected differential pick time calculate: ``((pick2 +
    pick2_corr) - pick1)``. To get a corrected differential travel time using
    origin times for both events calculate: ``((pick2 + pick2_corr - ot2) -
    (pick1 - ot1))``

    :type pick1: :class:`~obspy.core.utcdatetime.UTCDateTime`
    :param pick1: Time of pick for `trace1`.
    :type trace1: :class:`~obspy.core.trace.Trace`
    :param trace1: Waveform data for `pick1`. Add some time at front/back.
            The appropriate part of the trace is used automatically.
    :type pick2: :class:`~obspy.core.utcdatetime.UTCDateTime`
    :param pick2: Time of pick for `trace2`.
    :type trace2: :class:`~obspy.core.trace.Trace`
    :param trace2: Waveform data for `pick2`. Add some time at front/back.
            The appropriate part of the trace is used automatically.
    :type t_before: float
    :param t_before: Time to start cross correlation window before pick times
            in seconds.
    :type t_after: float
    :param t_after: Time to end cross correlation window after pick times in
            seconds.
    :type cc_maxlag: float
    :param cc_maxlag: Maximum lag time tested during cross correlation in
            seconds.
    :type filter: str
    :param filter: None for no filtering or name of filter type
            as passed on to :meth:`~obspy.core.Trace.trace.filter` if filter
            should be used. To avoid artifacts in filtering provide
            sufficiently long time series for `trace1` and `trace2`.
    :type filter_options: dict
    :param filter_options: Filter options that get passed on to
            :meth:`~obspy.core.Trace.trace.filter` if filtering is used.
    :type plot: bool
    :param plot: Determines if pick is refined automatically (default, ""),
            if an informative matplotlib plot is shown ("plot"), or if an
            interactively changeable PyQt Window is opened ("interactive").
    :type filename: str
    :param filename: If plot option is selected, specifying a filename here
            (e.g. 'myplot.pdf' or 'myplot.png') will output the plot to a file
            instead of opening a plot window.
    :rtype: (float, float)
    :returns: Correction time `pick2_corr` for `pick2` pick time as a float and
            corresponding correlation coefficient.
    """
    # perform some checks on the traces
    if trace1.stats.sampling_rate != trace2.stats.sampling_rate:
        msg = "Sampling rates do not match: %s != %s" % \
            (trace1.stats.sampling_rate, trace2.stats.sampling_rate)
        raise Exception(msg)
    if trace1.id != trace2.id:
        msg = "Trace ids do not match: %s != %s" % (trace1.id, trace2.id)
        warnings.warn(msg)
    samp_rate = trace1.stats.sampling_rate
    # don't modify existing traces with filters
    if filter:
        trace1 = trace1.copy()
        trace2 = trace2.copy()
    # check data, apply filter and take correct slice of traces
    slices = []
    for _i, (t, tr) in enumerate(((pick1, trace1), (pick2, trace2))):
        start = t - t_before - (cc_maxlag / 2.0)
        end = t + t_after + (cc_maxlag / 2.0)
        duration = end - start
        # check if necessary time spans are present in data
        if tr.stats.starttime > start:
            msg = "Trace %s starts too late." % _i
            raise Exception(msg)
        if tr.stats.endtime < end:
            msg = "Trace %s ends too early." % _i
            raise Exception(msg)
        if filter and start - tr.stats.starttime < duration:
            msg = "Artifacts from signal processing possible. Trace " + \
                  "%s should have more additional data at the start." % _i
            warnings.warn(msg)
        if filter and tr.stats.endtime - end < duration:
            msg = "Artifacts from signal processing possible. Trace " + \
                  "%s should have more additional data at the end." % _i
            warnings.warn(msg)
        # apply signal processing and take correct slice of data
        if filter:
            tr.data = tr.data.astype(np.float64)
            tr.detrend(type='demean')
            tr.data *= cosTaper(len(tr), 0.1)
            tr.filter(type=filter, **filter_options)
        slices.append(tr.slice(start, end))
    # cross correlate
    shift_len = int(cc_maxlag * samp_rate)
    _cc_shift, cc_max, cc = xcorr(slices[0].data,
                                  slices[1].data,
                                  shift_len,
                                  full_xcorr=True)
    cc_curvature = np.concatenate((np.zeros(1), np.diff(cc, 2), np.zeros(1)))
    cc_convex = np.ma.masked_where(np.sign(cc_curvature) >= 0, cc)
    cc_concave = np.ma.masked_where(np.sign(cc_curvature) < 0, cc)
    # check results of cross correlation
    if cc_max < 0:
        msg = "Absolute maximum is negative: %.3f. " % cc_max + \
              "Using positive maximum: %.3f" % max(cc)
        warnings.warn(msg)
        cc_max = max(cc)
    if cc_max < 0.8:
        msg = "Maximum of cross correlation lower than 0.8: %s" % cc_max
        warnings.warn(msg)
    # make array with time shifts in seconds corresponding to cc function
    cc_t = np.linspace(-cc_maxlag, cc_maxlag, shift_len * 2 + 1)
    # take the subportion of the cross correlation around the maximum that is
    # convex and fit a parabola.
    # use vertex as subsample resolution best cc fit.
    peak_index = cc.argmax()
    first_sample = peak_index
    # XXX this could be improved..
    while first_sample > 0 and cc_curvature[first_sample - 1] <= 0:
        first_sample -= 1
    last_sample = peak_index
    while last_sample < len(cc) - 1 and cc_curvature[last_sample + 1] <= 0:
        last_sample += 1
    if first_sample == 0 or last_sample == len(cc) - 1:
        msg = "Fitting at maximum lag. Maximum lag time should be increased."
        warnings.warn(msg)
    # work on subarrays
    num_samples = last_sample - first_sample + 1
    if num_samples < 3:
        msg = "Less than 3 samples selected for fit to cross " + \
              "correlation: %s" % num_samples
        raise Exception(msg)
    if num_samples < 5:
        msg = "Less than 5 samples selected for fit to cross " + \
              "correlation: %s" % num_samples
        warnings.warn(msg)
    # quadratic fit for small subwindow
    coeffs, residual = scipy.polyfit(cc_t[first_sample:last_sample + 1],
                                     cc[first_sample:last_sample + 1],
                                     deg=2,
                                     full=True)[:2]
    # check results of fit
    if coeffs[0] >= 0:
        msg = "Fitted parabola opens upwards!"
        warnings.warn(msg)
    if residual > 0.1:
        msg = "Residual in quadratic fit to cross correlation maximum " + \
              "larger than 0.1: %s" % residual
        warnings.warn(msg)
    # X coordinate of vertex of parabola gives time shift to correct
    # differential pick time. Y coordinate gives maximum correlation
    # coefficient.
    dt = -coeffs[1] / 2.0 / coeffs[0]
    coeff = (4 * coeffs[0] * coeffs[2] - coeffs[1]**2) / (4 * coeffs[0])
    # this is the shift to apply on the time axis of `trace2` to align the
    # traces. Actually we do not want to shift the trace to align it but we
    # want to correct the time of `pick2` so that the traces align without
    # shifting. This is the negative of the cross correlation shift.
    dt = -dt
    pick2_corr = dt
    # plot the results if selected
    if plot is True:
        import matplotlib
        if filename:
            matplotlib.use('agg')
        import matplotlib.pyplot as plt
        fig = plt.figure()
        ax1 = fig.add_subplot(211)
        tmp_t = np.linspace(0, len(slices[0]) / samp_rate, len(slices[0]))
        ax1.plot(tmp_t,
                 slices[0].data / float(slices[0].data.max()),
                 "k",
                 label="Trace 1")
        ax1.plot(tmp_t,
                 slices[1].data / float(slices[1].data.max()),
                 "r",
                 label="Trace 2")
        ax1.plot(tmp_t - dt,
                 slices[1].data / float(slices[1].data.max()),
                 "g",
                 label="Trace 2 (shifted)")
        ax1.legend(loc="lower right", prop={'size': "small"})
        ax1.set_title("%s" % slices[0].id)
        ax1.set_xlabel("time [s]")
        ax1.set_ylabel("norm. amplitude")
        ax2 = fig.add_subplot(212)
        ax2.plot(cc_t,
                 cc_convex,
                 ls="",
                 marker=".",
                 c="k",
                 label="xcorr (convex)")
        ax2.plot(cc_t,
                 cc_concave,
                 ls="",
                 marker=".",
                 c="0.7",
                 label="xcorr (concave)")
        ax2.plot(cc_t[first_sample:last_sample + 1],
                 cc[first_sample:last_sample + 1],
                 "b.",
                 label="used for fitting")
        tmp_t = np.linspace(cc_t[first_sample], cc_t[last_sample],
                            num_samples * 10)
        ax2.plot(tmp_t, scipy.polyval(coeffs, tmp_t), "b", label="fit")
        ax2.axvline(-dt, color="g", label="vertex")
        ax2.axhline(coeff, color="g")
        ax2.set_xlabel("%.2f at %.3f seconds correction" % (coeff, -dt))
        ax2.set_ylabel("correlation coefficient")
        ax2.set_ylim(-1, 1)
        ax2.legend(loc="lower right", prop={'size': "x-small"})
        # plt.legend(loc="lower left")
        if filename:
            fig.savefig(fname=filename)
        else:
            plt.show()

    return (pick2_corr, coeff)
コード例 #12
0
ファイル: MWCS.py プロジェクト: BenjaminFores/MSNoise
def mwcs(ccCurrent, ccReference, fmin, fmax, sampRate, tmin, windL, step,
         plot=False):
    """...

    :type ccCurrent: :class:`numpy.ndarray`
    :param ccCurrent: The "Current" timeseries
    :type ccReference: :class:`numpy.ndarray`
    :param ccReference: The "Reference" timeseries
    :type fmin: float
    :param fmin: The lower frequency bound to compute the dephasing
    :type fmax: float
    :param fmax: The higher frequency bound to compute the dephasing
    :type sampRate: float
    :param sampRate: The sample rate of the input timeseries
    :type tmin: float
    :param tmin: The leftmost time lag (used to compute the "time lags array")
    :type windL: float
    :param windL: The moving window length
    :type step: float
    :param step: The step to jump for the moving window
    :type plot: bool
    :param plot: If True, plots the MWCS result for each window. Defaults to
        False

    :rtype: :class:`numpy.ndarray`
    :returns: [Taxis,deltaT,deltaErr,deltaMcoh]. Taxis contains the central
        times of the windows. The three other columns contain dt, error and
        mean coherence for each window.
    """

    windL = np.int(windL*sampRate)
    step = np.int(step*sampRate)
    count = 0
    deltaT = []
    deltaErr = []
    deltaMcoh = []
    Taxis = []
    padd = 2**(nextpow2(windL)+2)

    # Tentative checking if enough point are used to compute the FFT
    freqVec = scipy.fftpack.fftfreq(int(padd), 1./sampRate)[:int(padd)/2]
    indRange = np.argwhere(np.logical_and(freqVec >= fmin,
                                          freqVec <= fmax))
    if len(indRange) < 2:
        padd = 2**(nextpow2(windL)+3)

    tp = cosTaper(windL, .85)

    timeaxis = (np.arange(len(ccCurrent)) / float(sampRate))+tmin
    minind = 0
    maxind = windL
    while maxind <= len(ccCurrent):
        ind = minind

        cci = ccCurrent[ind:(ind+windL)].copy()
        cci = scipy.signal.detrend(cci, type='linear')
        cci -= cci.min()
        cci /= cci.max()
        cci -= np.mean(cci)
        cci *= tp

        cri = ccReference[ind:(ind+windL)].copy()
        cri = scipy.signal.detrend(cri, type='linear')
        cri -= cri.min()
        cri /= cri.max()
        cri -= np.mean(cri)
        cri *= tp

        Fcur = scipy.fftpack.fft(cci, n=int(padd))[:int(padd)/2]
        Fref = scipy.fftpack.fft(cri, n=int(padd))[:int(padd)/2]

        Fcur2 = np.real(Fcur)**2 + np.imag(Fcur)**2
        Fref2 = np.real(Fref)**2 + np.imag(Fref)**2

        smoother = 5

        dcur = np.sqrt(smooth(Fcur2, window='hanning', half_win=smoother))
        dref = np.sqrt(smooth(Fref2, window='hanning', half_win=smoother))

        # Calculate the cross-spectrum
        X = Fref*(Fcur.conj())
        X = smooth(X, window='hanning', half_win=smoother)
        dcs = np.abs(X)

        # Find the values the frequency range of interest
        freqVec = scipy.fftpack.fftfreq(len(X)*2, 1./sampRate)[:int(padd)/2]
        indRange = np.argwhere(np.logical_and(freqVec >= fmin,
                                              freqVec <= fmax))

        # Get Coherence and its mean value
        coh = getCoherence(dcs, dref, dcur)
        mcoh = np.mean(coh[indRange])

        # Get Weights
        w = 1.0 / (1.0 / (coh[indRange]**2) - 1.0)
        w[coh[indRange] >= 0.99] = 1.0 / (1.0 / 0.9801 - 1.0)
        w = np.sqrt(w * np.sqrt(dcs[indRange]))
        # w /= (np.sum(w)/len(w)) #normalize
        w = np.real(w)

        # Frequency array:
        v = np.real(freqVec[indRange]) * 2 * np.pi
        vo = np.real(freqVec) * 2 * np.pi

        # Phase:
        phi = np.angle(X)
        phi[0] = 0.
        phi = np.unwrap(phi)
        #phio = phi.copy()
        phi = phi[indRange]

        # Calculate the slope with a weighted least square linear regression
        # forced through the origin
        # weights for the WLS must be the variance !
        res = sm.regression.linear_model.WLS(phi, v, w**2).fit()

        # print "forced", np.real(res.params[0])
        # print "!forced", np.real(res2.params[0])

        m = np.real(res.params[0])
        deltaT.append(m)

        # print phi.shape, v.shape, w.shape
        e = np.sum((phi-m*v)**2) / (np.size(v)-1)
        s2x2 = np.sum(v**2 * w**2)
        sx2 = np.sum(w * v**2)
        e = np.sqrt(e*s2x2 / sx2**2)
        # print w.shape
        if plot:
            plt.figure()
            plt.suptitle('%.1fs' % (timeaxis[ind + windL/2]))
            plt.subplot(311)
            plt.plot(cci)
            plt.plot(cri)
            ax = plt.subplot(312)
            plt.plot(vo/(2*np.pi), phio)
            plt.scatter(v/(2*np.pi), phi, c=w, edgecolor='none',
                        vmin=0.6, vmax=1)
            plt.subplot(313, sharex=ax)
            plt.plot(v/(2*np.pi), coh[indRange])
            plt.axhline(mcoh, c='r')
            plt.axhline(1.0, c='k', ls='--')
            plt.xlim(-0.1, 1.5)
            plt.ylim(0, 1.5)
            plt.show()

        deltaErr.append(e)
        deltaMcoh.append(np.real(mcoh))
        Taxis.append(timeaxis[ind + windL/2])
        count += 1

        minind += step
        maxind += step
        del Fcur, Fref
        del X
        del freqVec
        del indRange
        del w, v, e, s2x2
        del res

    if maxind > len(ccCurrent)+step:
        logging.warning("The last window was too small, but was computed")

    return np.array([Taxis, deltaT, deltaErr, deltaMcoh]).T
コード例 #13
0
def mwcs(ccCurrent, ccReference, fmin, fmax, sampRate, tmin, windL, step):
    windL = np.int(windL * sampRate)
    step = np.int(step * sampRate)
    count = 0
    deltaT = []
    deltaErr = []
    deltaMcoh = []
    Taxis = []

    padd = 2**(nextpow2(windL) + 1)

    tp = cosTaper(windL, 0.02)
    timeaxis = (np.arange(len(ccCurrent)) / float(sampRate)) + tmin

    minind = 0
    maxind = windL
    while maxind <= len(ccCurrent):
        # print minind, maxind, timeaxis[np.mean([minind, maxind])], timeaxis[minind + windL/2]
        ind = minind
        cci = ccCurrent[ind:(ind + windL)].copy()
        cci -= np.mean(cci)
        cci *= tp

        cri = ccReference[ind:(ind + windL)].copy()
        cri -= np.mean(cri)
        cri *= tp

        Fcur = scipy.fftpack.fft(cci, n=int(padd))[:padd / 2]
        Fref = scipy.fftpack.fft(cri, n=int(padd))[:padd / 2]

        Fcur2 = np.real(Fcur)**2 + np.imag(Fcur)**2
        Fref2 = np.real(Fref)**2 + np.imag(Fref)**2

        dcur = np.sqrt(smooth(Fcur2, type='hanning'))
        dref = np.sqrt(smooth(Fref2, type='hanning'))

        #Calculate the cross-spectrum
        X = Fref * (Fcur.conj())

        X = smooth(X, type='hanning')

        dcs = np.abs(X)

        #Find the values the frequency range of interest
        freqVec = scipy.fftpack.fftfreq(len(X) * 2, 1. / sampRate)[:padd / 2]
        indRange = np.argwhere(np.logical_and(freqVec >= fmin,
                                              freqVec <= fmax))

        # Get Coherence and its mean value
        coh = getCoherence(dcs, dref, dcur)
        mcoh = np.mean(coh[indRange])

        #Get Weights
        w = 1.0 / (1.0 / (coh[indRange]**2) - 1.0)
        w[coh[indRange] >= 0.99] = 1.0 / (1.0 / 0.9801 - 1.0)
        w = np.sqrt(w * np.sqrt(dcs[indRange]))
        # w /= (np.sum(w)/len(w)) #normalize
        w = np.real(w)

        #Frequency array:
        v = np.real(freqVec[indRange]) * 2 * np.pi
        # vo = np.real(freqVec) * 2 * np.pi

        #Phase:
        phi = np.angle(X)
        phi[0] = 0
        phi = np.unwrap(phi)
        # print phi[0]
        # phio = phi
        phi = phi[indRange]

        #Calculate the slope with a weighted least square linear regression forced through the origin
        #weights for the WLS must be the variance !
        res = sm.regression.linear_model.WLS(phi, v, w**2).fit()

        m = np.real(res.params[0])
        deltaT.append(m)

        # print phi.shape, v.shape, w.shape
        e = np.sum((phi - m * v)**2) / (np.size(v) - 1)
        s2x2 = np.sum(v**2 * w**2)
        sx2 = np.sum(w * v**2)
        e = np.sqrt(e * s2x2 / sx2**2)
        # print w.shape
        # plt.plot(vo, phio)
        # plt.scatter(v,phi,c=w)
        # plt.plot(vo,vo*m)
        # plt.xlim(-1,10)
        # plt.ylim(-5,5)
        # plt.show()

        deltaErr.append(e)
        # print m, e, res.bse[0]
        deltaMcoh.append(np.real(mcoh))
        Taxis.append(timeaxis[ind + windL / 2])
        count = count + 1

        minind += step
        maxind += step
        del Fcur, Fref
        del X
        del freqVec
        del indRange
        del w, v, e, s2x2
        del res

    if maxind > len(ccCurrent) + step:
        logging.warning("The last window was too small, but was computed")

    return np.array([Taxis, deltaT, deltaErr, deltaMcoh]).T
コード例 #14
0
ファイル: s03compute_cc.py プロジェクト: cgd27/MSNoise
                     rms_threshold = filterdb.rms_threshold
                     # print "Filter Bounds used:", filterid, low, high
                     # Npts = min30
                     # Nc = 2* Npts - 1
                     # Nfft = 2**nextpow2(Nc)
 
                     Nfft = min30
                     if min30 / 2 % 2 != 0:
                         Nfft = min30 + 2
 
                     trames2hWb = np.zeros((2, int(Nfft)), dtype=np.complex)
                     for i, station in enumerate(pair):
                         # print "USING rms threshold = %f" % rms_threshold
                         # logging.debug("rmsmat[i] = %f" % rmsmat[i])
                         if rmsmat[i] > rms_threshold:
                             cp = cosTaper(len(trame2h[i]),0.04)
                             
                             if windsorizing != 0:
                                 indexes = np.where(
                                     np.abs(trame2h[i]) > (windsorizing * rmsmat[i]))[0]
                                 # clipping at windsorizing*rms
                                 trame2h[i][indexes] = (trame2h[i][indexes] / np.abs(
                                     trame2h[i][indexes])) * windsorizing * rmsmat[i]
 
                             # logging.debug('whiten')
 
                             trames2hWb[i] = whiten(
                                 trame2h[i]*cp, Nfft, dt, low, high, plot=False)
                         else:
                             # logging.debug("Station no %d, pas de pretraitement car rms < %f ou NaN"% (i, rms_threshold))
                             trames2hWb[i] = np.zeros(int(Nfft))
コード例 #15
0
stream = read(file)

for tr in stream:
    # get poles, zeros, sensitivity and gain
    paz = sp.getPAZ(tr.stats.channel)
    # Uncomment the following for:
    # Integrate by adding a zero at the position zero
    # As for the simulation the poles and zeros are inverted and convolved
    # in the frequency domain this is basically mutliplying by 1/jw which
    # is an integration in the frequency domain
    # See "Of Poles and Zeros", Frank Scherbaum, Springer 2007
    #paz['zeros'].append(0j)
    # preprocessing
    tr.data = tr.data.astype('float64')     #convert data to float
    tr.data = detrend(tr.data, 'linear')    #detrend
    tr.data *= cosTaper(tr.stats.npts, 0.10) #costaper 5% at start and end
    # correct for instrument, play with water_level
    # this will results to unit of XSEEDs tag stage_signal_output_units
    # most common for seed is m/s, write xseed by sp.writeXSEED('xs.txt')
    tr.data = seisSim(tr.data, tr.stats.sampling_rate, paz, inst_sim=None, 
                      water_level=60.0)
    tr.data = tr.data/paz['sensitivity']
    # You need to do postprocessing the low freq are most likely artefacts (result from
    # dividing the freqresp / to high water_level), use a highpass to get
    # rid of the artefacts, e.g. highpass at e.g. 2.0Hz
    #tr.data = highpass(tr.data, 2.0, df=tr.stats.sampling_rate, corners=2)


#
# the plotting part
#
コード例 #16
0
ファイル: MWCS.py プロジェクト: hejunzhu/MSNoise
def mwcs(ccCurrent,
         ccReference,
         fmin,
         fmax,
         sampRate,
         tmin,
         windL,
         step,
         plot=False):
    """...

    :type ccCurrent: :class:`numpy.ndarray`
    :param ccCurrent: The "Current" timeseries
    :type ccReference: :class:`numpy.ndarray`
    :param ccReference: The "Reference" timeseries
    :type fmin: float
    :param fmin: The lower frequency bound to compute the dephasing
    :type fmax: float
    :param fmax: The higher frequency bound to compute the dephasing
    :type sampRate: float
    :param sampRate: The sample rate of the input timeseries
    :type tmin: float
    :param tmin: The leftmost time lag (used to compute the "time lags array")
    :type windL: float
    :param windL: The moving window length
    :type step: float
    :param step: The step to jump for the moving window
    :type plot: bool
    :param plot: If True, plots the MWCS result for each window. Defaults to
        False

    :rtype: :class:`numpy.ndarray`
    :returns: [Taxis,deltaT,deltaErr,deltaMcoh]. Taxis contains the central
        times of the windows. The three other columns contain dt, error and
        mean coherence for each window.
    """

    windL = np.int(windL * sampRate)
    step = np.int(step * sampRate)
    count = 0
    deltaT = []
    deltaErr = []
    deltaMcoh = []
    Taxis = []
    padd = 2**(nextpow2(windL) + 2)

    # Tentative checking if enough point are used to compute the FFT
    freqVec = scipy.fftpack.fftfreq(int(padd), 1. / sampRate)[:int(padd) / 2]
    indRange = np.argwhere(np.logical_and(freqVec >= fmin, freqVec <= fmax))
    if len(indRange) < 2:
        padd = 2**(nextpow2(windL) + 3)

    tp = cosTaper(windL, .85)

    timeaxis = (np.arange(len(ccCurrent)) / float(sampRate)) + tmin
    minind = 0
    maxind = windL
    while maxind <= len(ccCurrent):
        ind = minind

        cci = ccCurrent[ind:(ind + windL)].copy()
        cci = scipy.signal.detrend(cci, type='linear')
        cci -= cci.min()
        cci /= cci.max()
        cci -= np.mean(cci)
        cci *= tp

        cri = ccReference[ind:(ind + windL)].copy()
        cri = scipy.signal.detrend(cri, type='linear')
        cri -= cri.min()
        cri /= cri.max()
        cri -= np.mean(cri)
        cri *= tp

        Fcur = scipy.fftpack.fft(cci, n=int(padd))[:int(padd) / 2]
        Fref = scipy.fftpack.fft(cri, n=int(padd))[:int(padd) / 2]

        Fcur2 = np.real(Fcur)**2 + np.imag(Fcur)**2
        Fref2 = np.real(Fref)**2 + np.imag(Fref)**2

        smoother = 5

        dcur = np.sqrt(smooth(Fcur2, window='hanning', half_win=smoother))
        dref = np.sqrt(smooth(Fref2, window='hanning', half_win=smoother))

        # Calculate the cross-spectrum
        X = Fref * (Fcur.conj())
        X = smooth(X, window='hanning', half_win=smoother)
        dcs = np.abs(X)

        # Find the values the frequency range of interest
        freqVec = scipy.fftpack.fftfreq(len(X) * 2,
                                        1. / sampRate)[:int(padd) / 2]
        indRange = np.argwhere(np.logical_and(freqVec >= fmin,
                                              freqVec <= fmax))

        # Get Coherence and its mean value
        coh = getCoherence(dcs, dref, dcur)
        mcoh = np.mean(coh[indRange])

        # Get Weights
        w = 1.0 / (1.0 / (coh[indRange]**2) - 1.0)
        w[coh[indRange] >= 0.99] = 1.0 / (1.0 / 0.9801 - 1.0)
        w = np.sqrt(w * np.sqrt(dcs[indRange]))
        # w /= (np.sum(w)/len(w)) #normalize
        w = np.real(w)

        # Frequency array:
        v = np.real(freqVec[indRange]) * 2 * np.pi
        vo = np.real(freqVec) * 2 * np.pi

        # Phase:
        phi = np.angle(X)
        phi[0] = 0.
        phi = np.unwrap(phi)
        #phio = phi.copy()
        phi = phi[indRange]

        # Calculate the slope with a weighted least square linear regression
        # forced through the origin
        # weights for the WLS must be the variance !
        res = sm.regression.linear_model.WLS(phi, v, w**2).fit()

        # print "forced", np.real(res.params[0])
        # print "!forced", np.real(res2.params[0])

        m = np.real(res.params[0])
        deltaT.append(m)

        # print phi.shape, v.shape, w.shape
        e = np.sum((phi - m * v)**2) / (np.size(v) - 1)
        s2x2 = np.sum(v**2 * w**2)
        sx2 = np.sum(w * v**2)
        e = np.sqrt(e * s2x2 / sx2**2)
        # print w.shape
        if plot:
            plt.figure()
            plt.suptitle('%.1fs' % (timeaxis[ind + windL / 2]))
            plt.subplot(311)
            plt.plot(cci)
            plt.plot(cri)
            ax = plt.subplot(312)
            plt.plot(vo / (2 * np.pi), phio)
            plt.scatter(v / (2 * np.pi),
                        phi,
                        c=w,
                        edgecolor='none',
                        vmin=0.6,
                        vmax=1)
            plt.subplot(313, sharex=ax)
            plt.plot(v / (2 * np.pi), coh[indRange])
            plt.axhline(mcoh, c='r')
            plt.axhline(1.0, c='k', ls='--')
            plt.xlim(-0.1, 1.5)
            plt.ylim(0, 1.5)
            plt.show()

        deltaErr.append(e)
        deltaMcoh.append(np.real(mcoh))
        Taxis.append(timeaxis[ind + windL / 2])
        count += 1

        minind += step
        maxind += step
        del Fcur, Fref
        del X
        del freqVec
        del indRange
        del w, v, e, s2x2
        del res

    if maxind > len(ccCurrent) + step:
        logging.warning("The last window was too small, but was computed")

    return np.array([Taxis, deltaT, deltaErr, deltaMcoh]).T
コード例 #17
0
ファイル: stalta4kw_st.py プロジェクト: obspy/branches
summary.append("######## %s  ---  %s ########" % (T1, T2))
summary.append("#" * 79)
summary.append(st.__str__(extended=True))
if exceptions:
    summary.append("#" * 33 + " Exceptions  " + "#" * 33)
    summary += exceptions
summary.append("#" * 79)

trig = []
mutt = []
if st:
    # preprocessing, backup original data for plotting at end
    st.merge(0)
    st.detrend("linear")
    for tr in st:
        tr.data = tr.data * cosTaper(len(tr), 0.01)
    #st.simulate(paz_remove="self", paz_simulate=cornFreq2Paz(1.0), remove_sensitivity=False)
    st.sort()
    st.filter("bandpass", freqmin=PAR.LOW, freqmax=PAR.HIGH, corners=1, zerophase=True)
    st.trim(T1, T2)
    st_trigger = st.copy()
    st.normalize(global_max=False)
    # do the triggering
    trig = coincidenceTrigger("recstalta", PAR.ON, PAR.OFF, st_trigger,
            thr_coincidence_sum=PAR.MIN_STATIONS,
            max_trigger_length=PAR.MAXLEN, trigger_off_extension=PAR.ALLOWANCE,
            details=True, sta=PAR.STA, lta=PAR.LTA)

    for t in trig:
        info = "%s %ss %s %s" % (t['time'].strftime("%Y-%m-%dT%H:%M:%S"), ("%.1f" % t['duration']).rjust(4), ("%i" % t['cft_peak_wmean']).rjust(3), "-".join(t['stations']))
        summary.append(info)
コード例 #18
0
ファイル: 03.compute_cc.py プロジェクト: pgervais/MSNoise
    for istation, station in enumerate(stations):
        for comp in comps:
            files = eval("datafiles%s['%s']"%(comp,station))
            if len(files) != 0:
                logging.debug("%s.%s Reading %i Files" % (station, comp, len(files)))
                stream = Stream()
                for file in sorted(files):
                    st = read(file,format="MSEED")
                    stream += st
                    del st
                stream.merge()
                stream = stream.split()
                for trace in stream:
                    data = trace.data
                    if len(data) > 2:
                        tp = cosTaper(len(data), 0.01 )
                        data -= np.mean(data)
                        data *= tp
                        trace.data = data
                    else:
                        trace.data *= 0
                    del data
                logging.debug("%s.%s Merging Stream" % (station, comp))
                stream.merge(fill_value=0) #fills gaps with 0s and gives only one 'Trace'
                logging.debug("%s.%s Slicing Stream to %s:%s" % (station, comp,utcdatetime.UTCDateTime(goal_day.replace('-','')),utcdatetime.UTCDateTime(goal_day.replace('-',''))+goal_duration-stream[0].stats.delta))
                
                stream[0].trim(utcdatetime.UTCDateTime(goal_day.replace('-','')),utcdatetime.UTCDateTime(goal_day.replace('-',''))+goal_duration-stream[0].stats.delta, pad=True,fill_value=0.0)
                trace = stream[0]

                data = trace.data
                freq = preprocess_lowpass
コード例 #19
0
ファイル: stalta4uh_st.py プロジェクト: obspy/branches
summary.append("######## %s  ---  %s ########" % (T1, T2))
summary.append("#" * 79)
summary.append(st.__str__(extended=True))
if exceptions:
    summary.append("#" * 33 + " Exceptions  " + "#" * 33)
    summary += exceptions
summary.append("#" * 79)

trig = []
mutt = []
if st:
    # preprocessing, backup original data for plotting at end
    st.merge(0)
    st.detrend("linear")
    for tr in st:
        tr.data = tr.data * cosTaper(len(tr), 0.01)
    #st.simulate(paz_remove="self", paz_simulate=cornFreq2Paz(1.0), remove_sensitivity=False)
    st.sort()
    st.filter("bandpass", freqmin=PAR.LOW, freqmax=PAR.HIGH, corners=1, zerophase=True)
    st.trim(T1, T2)
    st_trigger = st.copy()
    st.normalize(global_max=False)
    # do the triggering
    trig = coincidenceTrigger("recstalta", PAR.ON, PAR.OFF, st_trigger,
            thr_coincidence_sum=PAR.MIN_STATIONS,
            max_trigger_length=PAR.MAXLEN, trigger_off_extension=PAR.ALLOWANCE,
            details=True, sta=PAR.STA, lta=PAR.LTA)

    for t in trig:
        info = "%s %ss %s %s" % (t['time'].strftime("%Y-%m-%dT%H:%M:%S"), ("%.1f" % t['duration']).rjust(4), ("%i" % t['cft_peak_wmean']).rjust(3), "-".join(t['stations']))
        summary.append(info)
コード例 #20
0
ファイル: 03.compute_cc.py プロジェクト: pgervais/MSNoise
 for comp in comps:
     files = eval("datafiles%s['%s']" % (comp, station))
     if len(files) != 0:
         logging.debug("%s.%s Reading %i Files" %
                       (station, comp, len(files)))
         stream = Stream()
         for file in sorted(files):
             st = read(file, format="MSEED")
             stream += st
             del st
         stream.merge()
         stream = stream.split()
         for trace in stream:
             data = trace.data
             if len(data) > 2:
                 tp = cosTaper(len(data), 0.01)
                 data -= np.mean(data)
                 data *= tp
                 trace.data = data
             else:
                 trace.data *= 0
             del data
         logging.debug("%s.%s Merging Stream" % (station, comp))
         stream.merge(fill_value=0
                      )  #fills gaps with 0s and gives only one 'Trace'
         logging.debug(
             "%s.%s Slicing Stream to %s:%s" %
             (station, comp,
              utcdatetime.UTCDateTime(goal_day.replace('-', '')),
              utcdatetime.UTCDateTime(goal_day.replace('-', '')) +
              goal_duration - stream[0].stats.delta))