Example #1
0
 def test_lowpass_vs_pitsa(self):
     """
     Test Butterworth lowpass filter against Butterworth lowpass filter of
     PITSA. Note that the corners value is twice the value of the filter
     sections in PITSA. The rms of the difference between ObsPy and PITSA
     tends to get bigger with higher order filtering.
     """
     # load test file
     filename = os.path.join(self.path, 'rjob_20051006.gz')
     with gzip.open(filename) as f:
         data = np.loadtxt(f)
     # parameters for the test
     samp_rate = 200.0
     freq = 5
     corners = 4
     # filter trace
     datcorr = lowpass(data, freq, df=samp_rate, corners=corners)
     # load pitsa file
     filename = os.path.join(self.path, 'rjob_20051006_lowpass.gz')
     with gzip.open(filename) as f:
         data_pitsa = np.loadtxt(f)
     # calculate normalized rms
     rms = np.sqrt(
         np.sum((datcorr - data_pitsa)**2) / np.sum(data_pitsa**2))
     self.assertEqual(rms < 1.0e-05, True)
Example #2
0
    def _post_processing(self, data_to_process):
        #wavelength = 1550.0e-9
        ## Read the channels from the data
        channel1 = np.array(data_to_process[0]).astype(TYPE)
        channel2 = np.array(data_to_process[1]).astype(TYPE)
        #channel3 = np.array(data_to_process[2]).astypye(float)
        times = np.arange(0, len(channel1[0])) * (1 / self.sampling_rate)

        ##call vfm for processing the data on each record
        y_shift = self._config['y_shift']
        processed = np.array([
            _vfm(channel1[i] + y_shift, channel2[i] + y_shift, times)
            for i in range(len(channel1))
        ])
        ##average and lowpass the processed data
        processed_avg = lowpass(processed.mean(axis=0),
                                self.lowpass_cutoff,
                                self.sampling_rate,
                                corners=4,
                                zerophase=True)
        # make a numpy array for our data
        new_data = np.array((1, ),
                            dtype=[(FIELD, TYPE, len(processed_avg) + 1)])
        ## copy the processed data to the numpy array
        new_data[FIELD] = np.append(processed_avg, processed_avg[-1])
        return new_data, times
Example #3
0
 def test_lowpassZPHSHVsPitsa(self):
     """
     Test Butterworth zero-phase lowpass filter against Butterworth
     zero-phase lowpass filter of PITSA. Note that the corners value is
     twice the value of the filter sections in PITSA. The rms of the
     difference between ObsPy and PITSA tends to get bigger with higher
     order filtering.
     Note: The Zero-Phase filters deviate from PITSA's zero-phase filters
     at the end of the trace! The rms for the test is calculated omitting
     the last 200 samples, as this part of the trace is assumed to
     generally be of low interest/importance.
     """
     # load test file
     filename = os.path.join(self.path, 'rjob_20051006.gz')
     with gzip.open(filename) as f:
         data = np.loadtxt(f)
     # parameters for the test
     samp_rate = 200.0
     freq = 5
     corners = 2
     # filter trace
     datcorr = lowpass(data, freq, df=samp_rate, corners=corners,
                       zerophase=True)
     # load pitsa file
     filename = os.path.join(self.path, 'rjob_20051006_lowpassZPHSH.gz')
     with gzip.open(filename) as f:
         data_pitsa = np.loadtxt(f)
     # calculate normalized rms
     rms = np.sqrt(np.sum((datcorr[:-200] - data_pitsa[:-200]) ** 2) /
                   np.sum(data_pitsa[:-200] ** 2))
     self.assertEqual(rms < 1.0e-05, True)
Example #4
0
 def test_lowpassVsPitsa(self):
     """
     Test Butterworth lowpass filter against Butterworth lowpass filter of
     PITSA. Note that the corners value is twice the value of the filter
     sections in PITSA. The rms of the difference between ObsPy and PITSA
     tends to get bigger with higher order filtering.
     """
     # load test file
     filename = os.path.join(self.path, 'rjob_20051006.gz')
     with gzip.open(filename) as f:
         data = np.loadtxt(f)
     # parameters for the test
     samp_rate = 200.0
     freq = 5
     corners = 4
     # filter trace
     datcorr = lowpass(data, freq, df=samp_rate, corners=corners)
     # load pitsa file
     filename = os.path.join(self.path, 'rjob_20051006_lowpass.gz')
     with gzip.open(filename) as f:
         data_pitsa = np.loadtxt(f)
     # calculate normalized rms
     rms = np.sqrt(np.sum((datcorr - data_pitsa) ** 2) /
                   np.sum(data_pitsa ** 2))
     self.assertEqual(rms < 1.0e-05, True)
Example #5
0
 def test_lowpass_zphsh_vs_pitsa(self):
     """
     Test Butterworth zero-phase lowpass filter against Butterworth
     zero-phase lowpass filter of PITSA. Note that the corners value is
     twice the value of the filter sections in PITSA. The rms of the
     difference between ObsPy and PITSA tends to get bigger with higher
     order filtering.
     Note: The Zero-Phase filters deviate from PITSA's zero-phase filters
     at the end of the trace! The rms for the test is calculated omitting
     the last 200 samples, as this part of the trace is assumed to
     generally be of low interest/importance.
     """
     # load test file
     filename = os.path.join(self.path, 'rjob_20051006.gz')
     with gzip.open(filename) as f:
         data = np.loadtxt(f)
     # parameters for the test
     samp_rate = 200.0
     freq = 5
     corners = 2
     # filter trace
     datcorr = lowpass(data,
                       freq,
                       df=samp_rate,
                       corners=corners,
                       zerophase=True)
     # load pitsa file
     filename = os.path.join(self.path, 'rjob_20051006_lowpassZPHSH.gz')
     with gzip.open(filename) as f:
         data_pitsa = np.loadtxt(f)
     # calculate normalized rms
     rms = np.sqrt(
         np.sum((datcorr[:-200] - data_pitsa[:-200])**2) /
         np.sum(data_pitsa[:-200]**2))
     self.assertEqual(rms < 1.0e-05, True)
def filtering(st,ty,args):

    hip = args.highpass 
    lop = args.lowpass 
    bdp = args.bandpass 

    if hip != "0":
       elements = hip.split()
       cors = int(elements[0])
       freq = eval(elements[1])
       for i in range(len(st)):
           st[i].data = highpass(st[i].data, freq, df=st[i].stats.sampling_rate,
                                 corners=cors, zerophase=args.zeroph)

    if lop != "0":
       elements = lop.split()
       cors = int(elements[0])
       freq = eval(elements[1])
       for i in range(len(st)):
           st[i].data = lowpass(st[i].data, freq, df=st[i].stats.sampling_rate,
                                corners=cors, zerophase=args.zeroph)

    if bdp != "0":
       elements = bdp.split()
       cors = int(elements[0])
       freq_min = eval(elements[1])
       freq_max = eval(elements[2])
       for i in range(len(st)):
           st[i].data = bandpass(st[i].data, freq_min, freq_max,
                                 df=st[i].stats.sampling_rate, corners=cors,
                                 zerophase=args.zeroph)

    return st
Example #7
0
def plot_source_time_func(self, file1, file2, time_win):
    #adapted from mtspec manual (krischer, 2016)
    if self.wvtype == 'P':
        starttime1 = self.P_tt[self.mainev]
        starttime2 = self.P_tt[self.egfev]
    elif self.wvtype == 'S':
        starttime1 = self.S_tt[self.mainev]
        starttime2 = self.S_tt[self.egfev]
    st1 = read(file1)
    st2 = read(file2)
    delt = st1[0].stats.delta
    channel = st1[0].stats.channel
    chalist = [channel[0:2] + i for i in ('Z', 'N', 'E')]
    RSTF = {}
    for i in chalist:
        try:
            st1 = read(file1.replace(chalist[0], i),
                       starttime=starttime1,
                       endtime=starttime1 + time_win)
            st2 = read(file2.replace(chalist[0], i),
                       starttime=starttime2,
                       endtime=starttime2 + time_win)
            st_mtd = mt_deconvolve(st1[0].data,
                                   st2[0].data,
                                   delt,
                                   nfft=len(st1[0].data),
                                   time_bandwidth=4,
                                   number_of_tapers=7,
                                   weights='constant',
                                   demean=True)
        except OSError:
            pass
        st_dec = st_mtd['deconvolved']
        #        freq_dec = st_mtd['frequencies']
        xlen = len(st_dec)
        time = np.linspace(0, xlen * delt, xlen)
        l1 = np.arange(0, xlen)
        index1 = np.where((l1 >= 0, l1 <= xlen / 2))[0]
        sla = st_dec[index1]
        #        sla_freq = freq_dec[index1[:-1]]
        index2 = np.where((l1 > xlen / 2) & (l1 <= xlen + 1))[0]
        slb = st_dec[index2]
        #        slb_freq = freq_dec[index2[:-1]]
        slba = np.concatenate((sla, slb))
        #        slba_freq = np.concatenate((sla_freq, slb_freq))
        #        index3 = np.where(slba == max(slba))[0]
        #        freqmin = 2*slba_freq[index3[:-1]]
        slba = lowpass(slba,
                       5,
                       st1[0].stats.sampling_rate,
                       corners=4,
                       zerophase=True)
        slba /= slba.max()
        RSTF[i] = []
        RSTF[i] = slba
    RSTF = sum(RSTF.values())
    RSTF /= RSTF.max()
    return time, RSTF
Example #8
0
 def set_sliprate_lp(self, dt, nsamp, freq, corners=4, zerophase=False):
     """
     :param dt: desired sampling
     :param nsamp: desired number of samples
     """
     self.sliprate = np.zeros(nsamp)
     self.sliprate[0] = 1.0 / dt
     self.sliprate = lowpass(self.sliprate, freq, 1./dt, corners, zerophase)
     self.dt = dt
Example #9
0
def stf_compute(st_main, st_egf, num_tapers):
    '''
    Designed to handle computation of relative source time function. This section is an
    excert from the tutorial of Mtspec by Krischer

    Inputs:
    st_main: Main event waveform
    st_egf: Auxilliary (eGF) event waveform
    num_tapers: Desired number of taper to be applied during spectrum estimation

    Returns:
    x: Time indexes of relative source time function
    y: amplitudes of relative source time function


    Note: This function is still being tested hence it is yet to be properly implemented.
    '''
    nfftlen = max(1000, st_main[0].stats.npts)
    delta = st_main[0].stats.delta
    half_nyq = np.arange(0, nfftlen)
    x = half_nyq * delta
    time_bandwidth = (num_tapers + 1) / 2
    y = []
    for tr_main, tr_egf in zip(st_main, st_egf):
        if len(tr_main.data) < len(tr_egf.data):
            lendif = len(tr_egf.data) - len(tr_main.data)
            tr_main.data = np.pad(tr_main.data, (0, lendif),
                                  'constant',
                                  constant_values=(0))
        elif len(tr_main.data) > len(tr_egf.data):
            lendif = len(tr_main.data) - len(tr_egf.data)
            tr_egf.data = np.pad(tr_egf.data, (0, lendif),
                                 'constant',
                                 constant_values=(0))
        pms = mt_deconvolve(tr_main.data,
                            tr_egf.data,
                            delta,
                            nfft=nfftlen,
                            time_bandwidth=time_bandwidth,
                            number_of_tapers=num_tapers,
                            weights='adaptive',
                            demean=True)

        stf_deconv = pms['deconvolved']
        first_half = stf_deconv[np.where((half_nyq >= 0)
                                         & (half_nyq <= nfftlen / 2))[0]]
        second_half = stf_deconv[np.where((half_nyq > nfftlen / 2)
                                          & (half_nyq <= nfftlen + 1))[0]]
        yx = np.concatenate((second_half, first_half))
        yx = lowpass(yx, 4, 1. / delta, corners=4, zerophase=True)
        #        yx = [i+abs(min(yx)) for i in yx]
        yx = [i / max(yx) for i in yx]
        y.append(yx)
    y = np.median(y, axis=0)
    return x, y
Example #10
0
def main():
    strike = 90.
    dip = 90.
    rake = 0.
    rupture_velo = 0.9
    rupture_len = 1000
    npoints = 1000
    slip = 10.
    dep = 50
    vs = 5.

    dt = 5.
    nts = 100

    lonstart = 0.

    area = rupture_len * dep * 2 / npoints * 1e10  # in cm**2

    equator_len = 2 * np.pi * 6371

    lat = np.zeros(npoints)
    lon = np.linspace(lonstart, lonstart + rupture_len / equator_len * 360.,
                      npoints)
    tinit = np.linspace(0., rupture_len, npoints) / (rupture_velo * vs)

    stf = np.zeros(nts)
    stf[1] = 1./dt
    stf = lowpass(stf, 1./100., 1./dt)

    plt.plot(stf)
    plt.show()

    f = open('strike_slip_eq.srf', 'w')
    f.write('POINTS %d\n' % (npoints,))

    for i in np.arange(npoints):
        # lon, lat, dep, stk, dip, area, tinit, dt
        f.write('%11.5f %11.5f %11.5f %11.5f %11.5f %11.5f %11.5f %11.5f\n' %
                (lon[i], lat[i], dep, strike, dip, area, tinit[i], dt))

        # rake, slip1, nt1, slip2, nt2, slip3, nt3
        f.write('%11.5f %11.5f %5d %11.5f %5d %11.5f %5d\n' %
                (rake, slip, nts, 0., 0, 0., 0))

        # f.write('%11.5f %11.5f %11.5f\n' % (0., 1., 0.))
        count = 0
        for j in np.arange(nts):
            f.write('%11.5f ' % (stf[j],))
            count += 1
            if count % 10 == 0:
                f.write('\n')
                count = 0

    f.close()
Example #11
0
def main():
    strike = 90.0
    dip = 90.0
    rake = 0.0
    rupture_velo = 0.9
    rupture_len = 1000
    npoints = 1000
    slip = 10.0
    dep = 50
    vs = 5.0

    dt = 5.0
    nts = 100

    lonstart = 0.0

    area = rupture_len * dep * 2 / npoints * 1e10  # in cm**2

    equator_len = 2 * np.pi * 6371

    lat = np.zeros(npoints)
    lon = np.linspace(lonstart, lonstart + rupture_len / equator_len * 360.0, npoints)
    tinit = np.linspace(0.0, rupture_len, npoints) / (rupture_velo * vs)

    stf = np.zeros(nts)
    stf[1] = 1.0 / dt
    stf = lowpass(stf, 1.0 / 100.0, 1.0 / dt)

    plt.plot(stf)
    plt.show()

    f = open("strike_slip_eq.srf", "w")
    f.write("POINTS %d\n" % (npoints,))

    for i in np.arange(npoints):
        # lon, lat, dep, stk, dip, area, tinit, dt
        f.write(
            "%11.5f %11.5f %11.5f %11.5f %11.5f %11.5f %11.5f %11.5f\n"
            % (lon[i], lat[i], dep, strike, dip, area, tinit[i], dt)
        )

        # rake, slip1, nt1, slip2, nt2, slip3, nt3
        f.write("%11.5f %11.5f %5d %11.5f %5d %11.5f %5d\n" % (rake, slip, nts, 0.0, 0, 0.0, 0))

        # f.write('%11.5f %11.5f %11.5f\n' % (0., 1., 0.))
        count = 0
        for j in np.arange(nts):
            f.write("%11.5f " % (stf[j],))
            count += 1
            if count % 10 == 0:
                f.write("\n")
                count = 0

    f.close()
Example #12
0
    def update(self, update_number, data):
        if self.trace_field is None:
            for device in data.dtype.names:
                if device.endswith(self._config['field_ending']):
                    field = device
                    break
            else:
                err = ('field ending in {} '.format(
                    self._config['field_ending']) +
                       'not found - cannot perform postprocessing')
                raise RuntimeError(err)
        # copy the data out
        data_to_process = data[field][0].copy()
        # GUI option to either keep original traces or delete them
        if self._config['remove_trace_data']:
            other_data = rfn.drop_fields(data, field, usemask=False)
        else:
            other_data = data
        # perform post-processing
        processed_data, times = self._post_processing(data_to_process)
        # plot data
        if self._config['plot']:
            plot_data = lowpass(processed_data[FIELD],
                                self._config['lowpass_cutoff'],
                                self.sampling_rate,
                                corners=4,
                                zerophase=True)
            plt.figure(self.__class__.__name__)
            # current plot
            plt.subplot(211)
            plt.cla()
            plt.plot(times, plot_data)
            plt.xlabel(r'Time [microseconds]')
            plt.ylabel(r'Velocity[m/s]')
            plt.pause(0.05)
            # wiggle plot
            plt.subplot(212)
            axes = plt.gca()
            data = plot_data / (2 * max(plot_data)) + update_number
            axes.plot(data, times, color='black', linewidth=0.5)
            plt.xlim((-1, self.updates))
            plt.xlabel('Update Number')
            plt.ylabel(r'Time [microseconds]')
            plt.pause(0.05)

        # insert and return the new data
        return rfn.merge_arrays([other_data, processed_data],
                                flatten=True,
                                usemask=False)
Example #13
0
    def cf_lowpass(self, tapper=True, parallel=True, freq=0.15):
        """
        Characteristic function with lowpass.
        Compute the mean values of the log10 differences of the convolved waveform with the wavelet from fmin to fmax
        with a low pass filter.
        :param tapper: True for tapper the result. Default=True.
        :param parallel: Either or not it should run cwt in parallel. Default=True.
        :param freq: Filter corner frequency. Default=0.15.
        :return: The filtered (lowpass, fmin=0.15) mean values of the log10 difference of the convolved waveform with
            the wavelet from fmin to fmax.
        """

        cf = lowpass(self.cf(tapper, parallel=parallel), freq, df=self._sample_rate, corners=3, zerophase=True)

        return cf
def test_finite_source():
    """
    incremental tests of bwd mode with source force
    """
    from obspy.signal.filter import lowpass
    instaseis_bwd = InstaSeisDB(os.path.join(DATA, "100s_db_bwd_displ_only"))

    receiver = Receiver(latitude=42.6390, longitude=74.4940)

    source = Source(
        latitude=89.91, longitude=0.0, depth_in_m=12000,
        m_rr=4.710000e+24 / 1E7,
        m_tt=3.810000e+22 / 1E7,
        m_pp=-4.740000e+24 / 1E7,
        m_rt=3.990000e+23 / 1E7,
        m_rp=-8.050000e+23 / 1E7,
        m_tp=-1.230000e+24 / 1E7)

    dt = instaseis_bwd.dt
    sliprate = np.zeros(1000)
    sliprate[0] = 1.
    sliprate = lowpass(sliprate, 1./100., 1./dt, corners=4)

    source.set_sliprate(sliprate, dt, time_shift=0., normalize=True)

    st_fin = instaseis_bwd.get_seismograms_finite_source(
        sources=[source], receiver=receiver,
        components=('Z', 'N', 'E', 'R', 'T'), dt=0.1)
    st_ref = instaseis_bwd.get_seismograms(
        source=source, receiver=receiver,
        components=('Z', 'N', 'E', 'R', 'T'), dt=0.1, reconvolve_stf=True)

    np.testing.assert_allclose(st_fin.select(component='Z')[0].data,
                               st_ref.select(component='Z')[0].data,
                               rtol=1E-7, atol=1E-12)
    np.testing.assert_allclose(st_fin.select(component='N')[0].data,
                               st_ref.select(component='N')[0].data,
                               rtol=1E-7, atol=1E-12)
    np.testing.assert_allclose(st_fin.select(component='E')[0].data,
                               st_ref.select(component='E')[0].data,
                               rtol=1E-7, atol=1E-12)
    np.testing.assert_allclose(st_fin.select(component='R')[0].data,
                               st_ref.select(component='R')[0].data,
                               rtol=1E-7, atol=1E-12)
    np.testing.assert_allclose(st_fin.select(component='T')[0].data,
                               st_ref.select(component='T')[0].data,
                               rtol=1E-7, atol=1E-12)
Example #15
0
    def update(self, update_number, data):
        if self.trace_field is None:
            for device in data.dtype.names:
                if device.endswith(self._config['field_ending']):
                    field = device
                    break
            else:
                err = ('field ending in {} '.format(self._config['field_ending']) +
                       'not found - cannot perform postprocessing')
                raise RuntimeError(err)
        # copy the data out
        data_to_process = data[field][0].copy()
        # GUI option to either keep original traces or delete them
        if self._config['remove_trace_data']:
            other_data = rfn.drop_fields(data, field, usemask=False)
        else:
            other_data = data
        # perform post-processing
        processed_data, times = self._post_processing(data_to_process)
        # plot data
        if self._config['plot']:
            plot_data = lowpass(processed_data[FIELD],
                                self._config['lowpass_cutoff'],
                                self.sampling_rate,
                                corners=4,
                                zerophase=True)
            plt.figure(self.__class__.__name__)
            # current plot
            plt.subplot(211)
            plt.cla()
            plt.plot(times, plot_data)
            plt.xlabel(r'Time [microseconds]')
            plt.ylabel(r'Velocity[m/s]')
            plt.pause(0.05)
            # wiggle plot
            plt.subplot(212)
            axes = plt.gca()
            data = plot_data / (2*max(plot_data)) + update_number
            axes.plot(data, times, color='black', linewidth=0.5)
            plt.xlim((-1, self.updates))
            plt.xlabel('Update Number')
            plt.ylabel(r'Time [microseconds]')
            plt.pause(0.05)

        # insert and return the new data
        return rfn.merge_arrays([other_data, processed_data], flatten=True, usemask=False)
Example #16
0
 def _filter_traces(self):
     if self.lpcut.get() > self.sampling_rate.get():
         tkmessage.showerror(
             "Error",
             "Lowpass cutoff frequency greater than sampling rate.")
     elif self.hpcut.get() > self.sampling_rate.get():
         tkmessage.showerror(
             "Error",
             "Highpass cutoff frequency greater than sampling rate.")
     else:
         for k, tr in enumerate(self._traces):
             if self.lowpass.get():
                 self._traces[k, :] = lowpass(tr, self.lpcut.get(),
                                              self.sampling_rate.get())
             if self.highpass.get():
                 self._traces[k, :] = highpass(tr, self.hpcut.get(),
                                               self.sampling_rate.get())
Example #17
0
    def lowpass(self, freq, corners=4, zerophase=False, traces=None):
        """
        Butterworth-Lowpass Filter of the data.

        Filter data removing data over certain frequency ``freq`` using ``corners``
        corners.

        :param freq: Filter corner frequency in Hz.
        :param corners: Filter corners. Note: This is twice the value of PITSA's
            filter sections
        :param zerophase: If True, apply filter once forwards and once backwards.
            This results in twice the number of corners but zero phase shift in
            the resulting filtered trace.
        :param traces: List of ``SEGYTrace`` objects with data to operate on.
            Default is to operate on all traces.
        """
        if not traces:
            traces = self.traces
        for tr in traces:
            df = 1.0 / (tr.header.sample_interval_in_ms_for_this_trace / 1.0e6)
            tr.data = filter.lowpass(tr.data, freq, df, corners=corners, zerophase=zerophase)
Example #18
0
    def _post_processing(self, data_to_process):
        #wavelength = 1550.0e-9
        ## Read the channels from the data
        channel1 = np.array(data_to_process[0]).astype(TYPE)
        channel2 = np.array(data_to_process[1]).astype(TYPE)
        #channel3 = np.array(data_to_process[2]).astypye(float)
        times = np.arange(0, len(channel1[0])) * (1 / self.sampling_rate)

        ##call vfm for processing the data on each record
        y_shift = self._config['y_shift']
        processed = np.array([_vfm(channel1[i] + y_shift,
                                   channel2[i] + y_shift,
                                   times) for i in range(len(channel1))])
        ##average and lowpass the processed data
        processed_avg = lowpass(processed.mean(axis=0),
                                self.lowpass_cutoff,
                                self.sampling_rate,
                                corners=4,
                                zerophase=True)
        # make a numpy array for our data
        new_data = np.array((1,), dtype=[(FIELD, TYPE, len(processed_avg)+1)])
        ## copy the processed data to the numpy array
        new_data[FIELD] = np.append(processed_avg, processed_avg[-1])
        return new_data, times
pl.close("all")
#Read stations
stations=np.genfromtxt(stafile,dtype="S6",usecols=0)
x=np.genfromtxt(stafile,dtype="f8",usecols=1)
y=np.genfromtxt(stafile,dtype="f8",usecols=2)
efkcoseis=np.zeros(stations.shape)
nfkcoseis=np.zeros(stations.shape)
ufkcoseis=np.zeros(stations.shape)
#Loop, read data and plot
for j in range(len(stations)):
    k=j
    sta=stations[k]
    print j
    #Read validation 1
    e_v1=lowpass(np.genfromtxt(val1+sta.lower()+'.syn',dtype="f8",usecols=0,skip_header=4),freq,df=1/dt1,zerophase=True)
    n_v1=lowpass(np.genfromtxt(val1+sta.lower()+'.syn',dtype="f8",usecols=1,skip_header=4),freq,df=1/dt1,zerophase=True)
    u_v1=lowpass(np.genfromtxt(val1+sta.lower()+'.syn',dtype="f8",usecols=2,skip_header=4),freq,df=1/dt1,zerophase=True)
    t_v1=np.arange(0,len(e_v1)*dt1,dt1)#-0.8
    #Read validation 2
    e_v2=lowpass(np.genfromtxt(val2+sta.lower()+'.syn',dtype="f8",usecols=0,skip_header=4),freq,df=1/dt2,zerophase=True)
    n_v2=lowpass(np.genfromtxt(val2+sta.lower()+'.syn',dtype="f8",usecols=1,skip_header=4),freq,df=1/dt2,zerophase=True)
    u_v2=lowpass(np.genfromtxt(val2+sta.lower()+'.syn',dtype="f8",usecols=2,skip_header=4),freq,df=1/dt2,zerophase=True)
    t_v2=np.arange(0,len(e_v2)*dt2,dt2)
    #Read validation 3
    e_v3=lowpass(np.genfromtxt(val3+sta.lower()+'.syn',dtype="f8",usecols=0,skip_header=4),freq,df=1/dt3,zerophase=True)
    n_v3=lowpass(np.genfromtxt(val3+sta.lower()+'.syn',dtype="f8",usecols=1,skip_header=4),freq,df=1/dt3,zerophase=True)
    u_v3=lowpass(np.genfromtxt(val3+sta.lower()+'.syn',dtype="f8",usecols=2,skip_header=4),freq,df=1/dt3,zerophase=True)
    t_v3=np.arange(0,len(e_v3)*dt3,dt3)
    #Read validation 4
    e_v4=lowpass(np.genfromtxt(val4+sta.lower()+'.syn',dtype="f8",usecols=0,skip_header=4),freq,df=1/dt4,zerophase=True)
Example #20
0
def decon_test(PSS_file, phase, method):
    """
    Function to test a given deconvolution method with synthetic data created
    with raysum.

    Parameters
    ----------
    PSS_file : str
        Filename of raysum file containing P-Sv-Sh traces.
    phase : str
        "S" or "P".
    method : str
        Deconvolution method: use 1. "fqd", "wat", or "con for
        frequency-dependent damped, waterlevel damped, constantly damped
        spectraldivision. 2. "it" for iterative deconvolution, and 3.
        "multit_con" or "multitap_fqd" for constantly or frequency-dependent
        damped multitaper deconvolution.

    Returns
    -------
    RF : np.array
        Matrix containing all receiver functions.
    dt : float
        Sampling interval.

    """
    PSS, dt, M, N, shift = read_raysum(phase, PSS_file=PSS_file)

    # Create receiver functions
    RF = []
    for i in range(M):
        if phase == "P":
            u = PSS[i, 0, :]
            v = PSS[i, 1, :]
        elif phase == "S":
            u = PSS[i, 1, :]
            v = PSS[i, 0, :]
        if method == "it":
            data, _, _ = it(u, v, dt, shift=shift)
            lrf = None
        # elif method == "gen_it":
        #     data, IR, iters, rej = gen_it(u, v, dt, phase=phase, shift=shift)
        #     lrf = None
        elif method == "fqd" or method == "wat" or method == "con":
            data, lrf = spectraldivision(v,
                                         u,
                                         dt,
                                         shift,
                                         phase=phase,
                                         regul=method,
                                         test=True)
        elif method == "multit_fqd":
            data, lrf, _, _ = multitaper(u, v, dt, shift, 'fqd')
            data = lowpass(data, 4.99, 1 / dt, zerophase=True)
        elif method == "multit_con":
            data, lrf, _, data2 = multitaper(u, v, dt, shift, 'con')
            data = lowpass(data, 4.99, 1 / dt, zerophase=True)
        else:
            raise NameError
        # if lrf is not None:
        #     # Normalisation for spectral division and multitaper
        #     # In order to do that, we have to find the factor that is
        #       necessary to
        #     # bring the zero-time pulse to 1
        #     fact = abs(lrf).max() #[round(shift/dt)]
        #     data = data/fact
        RF.append(RFTrace(data))
        RF[-1].stats.delta = dt
        RF[-1].stats.starttime = UTCDateTime(0)
        RF[-1].stats.onset = UTCDateTime(0) + shift
        RF[-1].stats.type = 'time'
        RF[-1].stats.phase = phase
        RF[-1].stats.channel = phase + 'RF'
        RF[-1].stats.network = 'RS'
    RF = RFStream(RF)
    return RF, dt
Example #21
0
def moveout(data: np.ndarray,
            st: obspy.core.trace.Stats,
            fname: str,
            latb: tuple,
            lonb: tuple,
            taper: bool,
            multiple: bool = False):
    """
    Depth migration for RF.
    Corrects the for the moveout of a converted phase. Flips time axis
    and polarity of SRF.

    Parameters
    ----------
    data : np.ndarray
        Receiver Function.
    st : obspy.core.trace.Stats
        Stream stats
    fname : str
        1D velocity model for moveout correction.
        Use '3D' for a 3D raytracing.
    latb : tuple
         Tuple in Form (minlat, maxlat). To save RAM on 3D raytraycing.
        Will remain unused for 1D RT.
    lonb : tuple
        Tuple in Form (minlon, maxlon)
    taper : bool
        If True, the last 10km of the RF will be tapered,
        which avoids jumps in station stacks. If False, the upper 5 km will be
        tapered. Should be False for CCP stacks.
    multiple : bool, optional
        Either False (don't include multiples), 'linear' for
        linear stack, 'PWS' (phase weighted stack or "zk" for a zhu &
        kanamori approach). False by default., by default False

    Returns
    -------
    z2 : Arraylike
        Depth vector in km.
    RF2 : Arraylike
        Vector containing the depth migrated RF.
    delta : Arraylike
        Vector containing Euclidian distance of piercing point from the
        station at depth z.

    """

    onset = st.onset
    tas = round(
        (onset - st.starttime) / st.delta)  # theoretical arrival sample
    rayp = st.slowness  # Ray parameter
    phase = st.phase  # Primary phase
    el = st.station_elevation

    phase = phase[-1]

    if fname[-2:] == '3D':
        test = fname == 'raysum3D'
        if test:
            test = int(st.station)  # The dip of the LABx
        htab, dt, delta, dtm1, dtm2 = dt_table_3D(rayp,
                                                  phase,
                                                  st.station_latitude,
                                                  st.station_longitude,
                                                  st.back_azimuth,
                                                  el,
                                                  latb,
                                                  lonb,
                                                  multiple,
                                                  test=test)
        # Multiple modes

    else:
        # dtm1 is PPS for PRFs (SSP SRFs), dtm2 PSS (SPP SRFs)
        htab, dt, delta, dtm1, dtm2 = dt_table(rayp, fname, phase, el,
                                               multiple)

    # queried times
    tq = np.arange(0, round(max(dt) + st.delta, 1), st.delta)
    z = np.interp(tq, dt, htab)  # Depth array for first RF (not evenly spaced)

    # Flip SRF
    if phase.upper() == "S":
        data = np.flip(data)
        data = -data
        tas = -tas

    # Shorten RF
    data = data[tas:]

    # Taper the first 2.5 seconds
    i = round(2.5 / st.delta)  # Find where rf is depth = 5
    tap = hann((i + 1) * 2)
    up, _ = np.split(tap, 2)
    if len(data) > len(up):  # That usually doesn't happen, only for extreme
        # discontinuities in 3D model and errors in SRF data
        taperfun = np.ones(len(data))
        taperfun[:len(up)] = up
        data = np.multiply(taperfun, data)

    if len(z) <= len(data):
        RF = data[:len(z)]
    else:  # truncate z not RF
        # 16.07.2020 this shouldn't be happening, but I'm experiencing an error
        # that might be prevented here
        z = z[:len(data)]
        RF = data[:len(z)]

    if round(min(z), int(-np.log10(res))) <= 0:
        zq = np.hstack((np.arange(min(z), 0,
                                  .1), np.arange(0,
                                                 max(z) + res, res)))
    else:
        zq = np.arange(round(min(z), int(-np.log10(res))), max(z) + res)

    if multiple:
        # lowpass filter see Tauzin et. al. (2016)
        RF = lowpass(RF, 1, st.sampling_rate, zerophase=True)
    # interpolate RF

    try:
        tck = interpolate.splrep(z, RF)

    except TypeError:
        # Happens almost never, the RF is empty?
        # No data or some bug in the data
        # correction and RF is too short, just return everything 0
        mes = "The length of the Receiver Function is" + str(len(z)) + "and\
        therefore too short, setting = 0."

        warnings.warn(mes, category=UserWarning, stacklevel=1)
        z2 = np.hstack((np.arange(-10, 0, .1), np.arange(0, maxz + res, res)))
        RF2 = np.zeros(z2.shape)
        delta2 = np.empty(z2.shape)
        delta2.fill(np.nan)
        return z2, RF2, delta2

    RF = interpolate.splev(zq, tck)

    # for the multiple modes
    if multiple:
        # Multiples are only useful for the upper part of the lithosphere
        # I will go with the upper ~constants.maxzm km for now
        if htab[len(dtm1) - 1] > maxzm:
            dtm1 = dtm1[:np.where(htab >= maxzm)[0][0]]
        if htab[len(dtm2) - 1] > maxzm:
            dtm2 = dtm2[:np.where(htab >= maxzm)[0][0]]
        tqm1 = np.arange(0, round(max(dtm1) + st.delta, 1), st.delta)
        tqm2 = np.arange(0, round(max(dtm2) + st.delta, 1), st.delta)
        zm1 = np.interp(tqm1, dtm1, htab[:len(dtm1)])
        zm2 = np.interp(tqm2, dtm2, htab[:len(dtm2)])
        # truncate RF
        RFm1 = data[:len(zm1)]
        RFm2 = data[:len(zm2)]

        # lowpass filter see Tauzin et. al. (2016)
        RFm1 = lowpass(RFm1, .25, st.sampling_rate, zerophase=True)
        RFm2 = lowpass(RFm2, .25, st.sampling_rate, zerophase=True)
        try:
            tckm1 = interpolate.splrep(zm1, RFm1)
            tckm2 = interpolate.splrep(zm2, RFm2)
        except TypeError:
            multiple = False
            mes = "Interpolation error in multiples. Only primary conversion\
                 will be used."

            warnings.warn(mes, category=UserWarning, stacklevel=1)
            pass

    if multiple:
        # query depths
        if round(min(zm1), int(-np.log10(res))) <= 0:
            zqm1 = np.hstack((np.arange(min(zm1), 0,
                                        .1), np.arange(0,
                                                       max(zm1) + res, res)))
            zqm2 = np.hstack((np.arange(min(zm2), 0,
                                        .1), np.arange(0,
                                                       max(zm2) + res, res)))
        else:
            zqm1 = np.arange(round(min(zm1), int(-np.log10(res))),
                             max(zm1) + res)
            zqm2 = np.arange(round(min(zm2), int(-np.log10(res))),
                             max(zm2) + res)

        RFm1 = interpolate.splev(zqm1, tckm1)
        # negative due to polarity change
        RFm2 = -1 * interpolate.splev(zqm2, tckm2)

    if taper:
        # Taper the last 10 km
        tap = hann(20)
        _, down = np.split(tap, 2)

        if len(RF) > len(down):  # That usually doesn't happen,
            # only for extreme
            # discontinuities in 3D model and errors in SRF data
            taper = np.ones(len(RF))
            taper[-len(down):] = down
            RF = np.multiply(taper, RF)

    z2 = np.hstack((np.arange(-10, 0, .1), np.arange(0, maxz + res, res)))
    RF2 = np.zeros(z2.shape)

    # np.where does not seem to work here
    starti = np.nonzero(np.isclose(z2, htab[0]))[0][0]
    if len(RF) + starti > len(RF2):
        # truncate
        # Honestly do not know why that what happen, but it does once in a
        # million times, perhaps rounding + interpolation.
        mes = "The interpolated RF is too long, truncating."
        warnings.warn(mes, category=UserWarning, stacklevel=1)
        diff = len(RF) + starti - len(RF2)
        RF = RF[:-diff]
    RF2[starti:starti + len(RF)] = RF
    if multiple:
        RFm1_2 = np.zeros(RF2.shape)
        RFm2_2 = np.zeros(RF2.shape)
        RFm1_2[starti:starti + len(RFm1)] = RFm1
        RFm2_2[starti:starti + len(RFm2)] = RFm2

    else:
        RFm1_2 = None
        RFm2_2 = None

    # reshape delta - else that will mess with the CCP stacking
    delta2 = np.empty(z2.shape)
    delta2.fill(np.nan)
    delta2[starti:starti + len(delta)] = delta

    return z2, RF2, delta2, RFm1_2, RFm2_2
Example #22
0
def process(tr, lowcut, highcut, filt_order, samp_rate,
            starttime=False, clip=False, length=86400,
            seisan_chan_names=False, ignore_length=False, fill_gaps=True,
            ignore_bad_data=False, fft_threads=1):
    """
    Basic function to process data, usually called by dayproc or shortproc.

    Functionally, this will bandpass, downsample and check headers and length
    of trace to ensure files start when they should and are the correct length.
    This is a simple wrapper on obspy functions, we include it here to provide
    a system to ensure all parts of the dataset are processed in the same way.

    .. note:: Usually this function is called via dayproc or shortproc.

    :type tr: obspy.core.trace.Trace
    :param tr: Trace to process
    :type lowcut: float
    :param lowcut:
        Low cut in Hz, if set to None and highcut is set, will use
        a lowpass filter.
    :type highcut: float
    :param highcut:
        High cut in Hz, if set to None and lowcut is set, will use
        a highpass filter.
    :type filt_order: int
    :param filt_order: Number of corners for filter.
    :type samp_rate: float
    :param samp_rate: Desired sampling rate in Hz.
    :type starttime: obspy.core.utcdatetime.UTCDateTime
    :param starttime: Desired start of trace
    :type clip: bool
    :param clip: Whether to expect, and enforce a set length of data or not.
    :type length: float
    :param length: Use to set a fixed length for data from the given starttime.
    :type seisan_chan_names: bool
    :param seisan_chan_names:
        Whether channels are named like seisan channels (which are two letters
        rather than SEED convention of three) - defaults to True.
    :type ignore_length: bool
    :param ignore_length: See warning in dayproc.
    :type fill_gaps: bool
    :param fill_gaps: Whether to pad any gaps found with zeros or not.
    :type ignore_bad_data: bool
    :param ignore_bad_data:
        If False (default), errors will be raised if data are excessively
        gappy or are mostly zeros. If True then no error will be raised, but
        an empty trace will be returned.
    :type fft_threads: int
    :param fft_threads: Number of threads to use for pyFFTW FFT in resampling

    :return: Processed trace.
    :type: :class:`obspy.core.stream.Trace`

    .. note::
        If your data contain gaps you should *NOT* fill those gaps before
        using the pre-process functions. The pre-process functions will fill
        the gaps internally prior to processing, process the data, then re-fill
        the gaps with zeros to ensure correlations are not incorrectly
        calculated within gaps. If your data have gaps you should pass a merged
        stream without the `fill_value` argument (e.g.: `tr = tr.merge()`).
    """
    # Add sanity check
    if highcut and highcut >= 0.5 * samp_rate:
        raise IOError('Highcut must be lower than the nyquist')

    # Define the start-time
    if starttime:
        # Be nice and allow a datetime object.
        if isinstance(starttime, dt.date) or isinstance(starttime,
                                                        dt.datetime):
            starttime = UTCDateTime(starttime)

    Logger.debug('Working on: {0}'.format(tr.id))
    # Check if the trace is gappy and pad if it is.
    gappy = False
    if isinstance(tr.data, np.ma.MaskedArray):
        gappy = True
        gaps, tr = _fill_gaps(tr)
    # Do a brute force quality check
    qual = _check_daylong(tr)
    if not qual:
        msg = ("Data have more zeros than actual data, please check the raw",
               " data set-up and manually sort it: " + tr.stats.station + "." +
               tr.stats.channel)
        if not ignore_bad_data:
            raise ValueError(msg)
        else:
            Logger.warning(msg)
            return Trace(data=np.array([]), header={
                "station": tr.stats.station, "channel": tr.stats.channel,
                "network": tr.stats.network, "location": tr.stats.location,
                "starttime": tr.stats.starttime,
                "sampling_rate": tr.stats.sampling_rate})
    tr = tr.detrend('simple')
    # Detrend data before filtering
    Logger.debug('I have {0} data points for {1} before processing'.format(
        tr.stats.npts, tr.id))

    # Sanity check to ensure files are daylong
    padded = False
    if clip:
        tr = tr.trim(starttime, starttime + length, nearest_sample=True)
    if float(tr.stats.npts / tr.stats.sampling_rate) != length and clip:
        Logger.info(
            'Data for {0} are not long-enough, will zero pad'.format(
                tr.id))
        if tr.stats.endtime - tr.stats.starttime < 0.8 * length\
           and not ignore_length:
            msg = (
                "Data for {0}.{1} is {2:.2f} seconds long, which is less than "
                "80 percent of the desired length ({3} seconds), will not "
                "pad".format(
                    tr.stats.station, tr.stats.channel,
                    tr.stats.endtime - tr.stats.starttime, length))
            if not ignore_bad_data:
                raise NotImplementedError(msg)
            else:
                Logger.warning(msg)
                return Trace(data=np.array([]), header={
                    "station": tr.stats.station, "channel": tr.stats.channel,
                    "network": tr.stats.network, "location": tr.stats.location,
                    "starttime": tr.stats.starttime,
                    "sampling_rate": tr.stats.sampling_rate})
        # trim, then calculate length of any pads required
        pre_pad_secs = tr.stats.starttime - starttime
        post_pad_secs = (starttime + length) - tr.stats.endtime
        if pre_pad_secs > 0 or post_pad_secs > 0:
            padded = True
            pre_pad = np.zeros(int(pre_pad_secs * tr.stats.sampling_rate))
            post_pad = np.zeros(int(post_pad_secs * tr.stats.sampling_rate))
            Logger.debug(str(tr))
            Logger.debug("Padding to day long with {0} s before and {1} s "
                         "at end".format(pre_pad_secs, post_pad_secs))
            tr.data = np.concatenate([pre_pad, tr.data, post_pad])
            # Use this rather than the expected pad because of rounding samples
            tr.stats.starttime -= len(pre_pad) * tr.stats.delta
            Logger.debug(str(tr))
        # If there is one sample too many after this remove the first one
        # by convention
        if len(tr.data) == (length * tr.stats.sampling_rate) + 1:
            tr.data = tr.data[1:len(tr.data)]
        if tr.stats.sampling_rate * length != tr.stats.npts:
                raise ValueError('Data are not long enough for ' +
                                 tr.stats.id)
        Logger.debug(
            'I now have {0} data points after enforcing length'.format(
                tr.stats.npts))
    # Check sampling rate and resample
    if tr.stats.sampling_rate != samp_rate:
        Logger.debug('Resampling')
        tr = _resample(tr, samp_rate, threads=fft_threads)
    # Filtering section
    tr = tr.detrend('simple')    # Detrend data again before filtering
    if highcut and lowcut:
        Logger.debug('Bandpassing')
        tr.data = bandpass(tr.data, lowcut, highcut,
                           tr.stats.sampling_rate, filt_order, True)
    elif highcut:
        Logger.debug('Lowpassing')
        tr.data = lowpass(tr.data, highcut, tr.stats.sampling_rate,
                          filt_order, True)
    elif lowcut:
        Logger.debug('Highpassing')
        tr.data = highpass(tr.data, lowcut, tr.stats.sampling_rate,
                           filt_order, True)
    else:
        Logger.warning('No filters applied')
    # Account for two letter channel names in s-files and therefore templates
    if seisan_chan_names:
        tr.stats.channel = tr.stats.channel[0] + tr.stats.channel[-1]

    if padded:
        Logger.debug("Reapplying zero pads post processing")
        Logger.debug(str(tr))
        pre_pad = np.zeros(int(pre_pad_secs * tr.stats.sampling_rate))
        post_pad = np.zeros(int(post_pad_secs * tr.stats.sampling_rate))
        pre_pad_len = len(pre_pad)
        post_pad_len = len(post_pad)
        Logger.debug(
            "Taking only valid data between {0} and {1} samples".format(
                pre_pad_len, tr.stats.npts - post_pad_len))
        # Re-apply the pads, taking only the data section that was valid
        tr.data = np.concatenate(
            [pre_pad, tr.data[pre_pad_len: len(tr.data) - post_pad_len],
             post_pad])
        Logger.debug(str(tr))
    # Sanity check to ensure files are daylong
    if float(tr.stats.npts / tr.stats.sampling_rate) != length and clip:
        Logger.info(
            'Data for {0} are not of daylong length, will zero pad'.format(
                tr.id))
        # Use obspy's trim function with zero padding
        tr = tr.trim(starttime, starttime + length, pad=True, fill_value=0,
                     nearest_sample=True)
        # If there is one sample too many after this remove the last one
        # by convention
        if len(tr.data) == (length * tr.stats.sampling_rate) + 1:
            tr.data = tr.data[1:len(tr.data)]
        if not tr.stats.sampling_rate * length == tr.stats.npts:
            raise ValueError('Data are not daylong for ' +
                             tr.stats.station + '.' + tr.stats.channel)
    # Replace the gaps with zeros
    if gappy:
        tr = _zero_pad_gaps(tr, gaps, fill_gaps=fill_gaps)
    return tr
Example #23
0
def make_stf(dt=0.10,
             nt=5000,
             fmin=1.0 / 100.0,
             fmax=1.0 / 8.0,
             filename='../INPUT/stf_new',
             plot=True):
    """
	Generate a source time function for ses3d by applying a bandpass filter to a Heaviside function.

	make_stf(dt=0.13, nt=4000, fmin=1.0/100.0, fmax=1.0/8.0, filename='../INPUT/stf_new', plot=True)

	dt: Length of the time step. Must equal dt in the event_* file.
	nt: Number of time steps. Must equal to or greater than nt in the event_* file.
	fmin: Minimum frequency of the bandpass.
	fmax: Maximum frequency of the bandpass.
	filename: Output filename.

	"""

    #- Make time axis and original Heaviside function. --------------------------------------------

    t = np.arange(0.0, float(nt + 1) * dt, dt)
    h = np.ones(len(t))

    #- Apply filters. -----------------------------------------------------------------------------

    h = flt.highpass(h, fmin, 1.0 / dt, 3, zerophase=False)
    h = flt.lowpass(h, fmax, 1.0 / dt, 5, zerophase=False)

    #- Plot output. -------------------------------------------------------------------------------

    if plot == True:

        #- Time domain.

        plt.plot(t, h, 'k')
        plt.xlim(0.0, float(nt) * dt)
        plt.xlabel('time [s]')
        plt.title('source time function (time domain)')

        plt.show()

        #- Frequency domain.

        hf = np.fft.fft(h)
        f = np.fft.fftfreq(len(hf), dt)

        plt.semilogx(f, np.abs(hf), 'k')
        plt.plot([fmin, fmin], [0.0, np.max(np.abs(hf))], 'r--')
        plt.text(1.1 * fmin, 0.5 * np.max(np.abs(hf)), 'fmin')
        plt.plot([fmax, fmax], [0.0, np.max(np.abs(hf))], 'r--')
        plt.text(1.1 * fmax, 0.5 * np.max(np.abs(hf)), 'fmax')
        plt.xlim(0.1 * fmin, 10.0 * fmax)
        plt.xlabel('frequency [Hz]')
        plt.title('source time function (frequency domain)')

        plt.show()

    #- Write to file. -----------------------------------------------------------------------------

    f = open(filename, 'w')

    #- Header.

    f.write('source time function, ses3d version 4.1\n')
    f.write('nt= ' + str(nt) + ', dt=' + str(dt) + '\n')
    f.write('filtered Heaviside, highpass(fmin=' + str(fmin) +
            ', corners=3, zerophase=False), lowpass(fmax=' + str(fmax) +
            ', corners=5, zerophase=False)\n')
    f.write('-- samples --\n')

    for k in range(len(h)):
        f.write(str(h[k]) + '\n')

    f.close()
Example #24
0
def resampleFilterAndCutTraces(stream, resampling_rate, lowpass_value,
                               highpass_value, zerophase, corners, starttime,
                               endtime, message_function=None):
    """
    Resamples, filters and cuts all Traces in a Stream object.

    It will always apply each operation to every trace in the order described
    above.

    :param stream: obspy.core.stream object
        Will be altered and has to contain at least one Trace.
    :param resampling_rate: float
        Desired new sample rate.
    :param lowpass_value: float
        High filter frequency.
    :param highpass_value: float
        Low filter frequency.
    :param zerophase: bool
        Whether or not to use a zerophase filter.
    :param corners: int
        Number of corners for the used Butterworth-Filter.
    :param starttime: obspy.core.UTCDateTime
        New starttime of each Trace.
    :param endtime: obspy.core.UTCDateTime
        New endtime of each Trace.
    :param message_function: Python function
        If given, a string will be passed to this function to document the
        current progress.
    """
    # Convert to floats for more exact handling. Also level the data.
    for trace in stream:
        trace.data = np.require(trace.data, 'float32')
        trace.data -= np.linspace(trace.data[0], trace.data[-1], len(trace.data))
    # The first step is to resample the data. This is done before trimming
    # so that any boundary effects that might occur can be cut away later
    # on.
    if resampling_rate != stream[0].stats.sampling_rate:
        time_range = stream[0].stats.endtime - \
                     stream[0].stats.starttime
        new_npts = time_range / \
                   (1 / resampling_rate) + 1
        new_freq = 1.0 / (time_range / float(new_npts - 1))
        for _i, trace in enumerate(stream):
            if message_function:
                msg = 'Resampling traces to %.2f Hz [%i/%i]...' % \
                        (resampling_rate, _i + 1, len(stream))
                message_function(msg)
            # Use scipy to resample the traces.
            trace.data = resample(trace.data, new_npts, window='hamming')
            trace.stats.sampling_rate = new_freq
    # Filter the trace. Differentiate between low-, high-, and bandpass
    if lowpass_value and highpass_value:
        if message_function:
            msg = 'Bandpass filtering traces from %.2f Hz to %.2f Hz...' % \
                    (highpass_value, highpass_value)
            message_function(msg)
        for trace in stream:
            trace.data = bandpass(trace.data, highpass_value,
                                  lowpass_value, trace.stats.sampling_rate,
                                  corners=corners, zerophase=zerophase)
    elif lowpass_value:
        if message_function:
            msg = 'Lowpass filtering traces with %.2f Hz...' % lowpass_value
            message_function(msg)
        for trace in stream:
            trace.data = lowpass(trace.data, lowpass_value,
                                  trace.stats.sampling_rate,
                                  corners=corners, zerophase=zerophase)
    elif highpass_value:
        if message_function:
            msg = 'Highpass filtering traces with %.2f Hz...' % highpass_value
            message_function(msg)
        for trace in stream:
            trace.data = highpass(trace.data, highpass_value,
                                  trace.stats.sampling_rate,
                                  corners=corners, zerophase=zerophase)
    # Trim the trace if it is necessary.
    if message_function:
        message_function('Trimming traces...')
    stream.trim(starttime, endtime)
Example #25
0
pl.close("all")
#Read stations
stations=np.genfromtxt(stafile,dtype="S6",usecols=0)
x=np.genfromtxt(stafile,dtype="f8",usecols=1)
y=np.genfromtxt(stafile,dtype="f8",usecols=2)
efkcoseis=np.zeros(stations.shape)
nfkcoseis=np.zeros(stations.shape)
ufkcoseis=np.zeros(stations.shape)
#Loop, read data and plot
for j in range(len(stations)):
    k=j
    sta=stations[k]
    print j
    #Read validation 1
    e_v1=lowpass(np.genfromtxt(val1+sta.lower()+'.syn',dtype="f8",usecols=0,skip_header=4),freq,df=1/dt1,zerophase=True)
    n_v1=lowpass(np.genfromtxt(val1+sta.lower()+'.syn',dtype="f8",usecols=1,skip_header=4),freq,df=1/dt1,zerophase=True)
    u_v1=lowpass(np.genfromtxt(val1+sta.lower()+'.syn',dtype="f8",usecols=2,skip_header=4),freq,df=1/dt1,zerophase=True)
    t_v1=np.arange(0,len(e_v1)*dt1,dt1)#-0.8
    #Read validation 2
    e_v2=lowpass(np.genfromtxt(val2+sta.lower()+'.syn',dtype="f8",usecols=0,skip_header=4),freq,df=1/dt2,zerophase=True)
    n_v2=lowpass(np.genfromtxt(val2+sta.lower()+'.syn',dtype="f8",usecols=1,skip_header=4),freq,df=1/dt2,zerophase=True)
    u_v2=lowpass(np.genfromtxt(val2+sta.lower()+'.syn',dtype="f8",usecols=2,skip_header=4),freq,df=1/dt2,zerophase=True)
    t_v2=np.arange(0,len(e_v2)*dt2,dt2)
    #Read validation 3
    e_v3=lowpass(np.genfromtxt(val3+sta.lower()+'.syn',dtype="f8",usecols=0,skip_header=4),freq,df=1/dt3,zerophase=True)
    n_v3=lowpass(np.genfromtxt(val3+sta.lower()+'.syn',dtype="f8",usecols=1,skip_header=4),freq,df=1/dt3,zerophase=True)
    u_v3=lowpass(np.genfromtxt(val3+sta.lower()+'.syn',dtype="f8",usecols=2,skip_header=4),freq,df=1/dt3,zerophase=True)
    t_v3=np.arange(0,len(e_v3)*dt3,dt3)
    #Read my computations
    if integrate==1: #Go to displacement-land
Example #26
0
def crossc(dstart,dend,ch1,ch2,day):
# here you load all the functions you need to use

  from obspy.seg2.seg2 import readSEG2
  from obspy.core import Stream
  import numpy as np
  from obspy.signal.cross_correlation import xcorr
  from numpy import sign
  from obspy.signal.filter import lowpass
  from obspy.signal.filter import highpass
  from obspy.signal.filter import bandstop
  from obspy.signal.filter import bandpass
  dataDir = "/import/three-data/hadzii/STEINACH/STEINACH_longtime/"
  outdir = "/home/jsalvermoser/Desktop/Processing/bands_SNR/" + "CH" + str(ch1) + "_CH" + str(ch2) + "/" + "JAN" + str(day) + "/"


	# loading the info for outfile-name
  stream_start = readSEG2(dataDir + str(dstart) + ".dat")
  t_start = stream_start[ch1].stats.seg2.ACQUISITION_TIME
  stream_end = readSEG2(dataDir + str(dend) + ".dat")
  t_end = stream_end[ch1].stats.seg2.ACQUISITION_TIME

	# initialization of the arrays and variables
  TR = []
  rms = []
  sq = []
  ncalm = 1
  nbeat  = 1
  corr128_calm = 0
  corr128_beat = 0
  nerror = 0
  mu1c=0
  mu2c=0
  mu3c=0
  mu1b=0
  mu2b=0
  mu3b=0
  var1c=0
  var2c=0
  var3c=0
  var1b=0
  var2b=0
  var3b=0
  SNR_calm_b1=[]
  SNR_calm_b2=[]
  SNR_calm_b3=[]
  SNR_beat_b1=[]
  SNR_beat_b2=[]
  SNR_beat_b3=[]
  
  
  #TAPER
  taper_percentage=0.05
  taper= np.blackman(int(len(time_vector) * taper_percentage))
  taper_left, taper_right = np.array_split(taper,2)
  taper = np.concatenate([taper_left,np.ones(len(time_vector)-len(taper)),taper_right])
  
  for j in range(0, dend-dstart):
    sq.append([])



  for k in range(dstart, dend, 4):
    start = k
    end = k + 5 # only used to merge 5-1 = 4 files to one stream
    try:  
		 st1 = merge_single(ch1,start,end)
		 st2 = merge_single(ch2,start,end)
		 st1.detrend('linear')  
		 st2.detrend('linear') 
		 # calculate squares for rms
		 r = k-dstart
		 sq[r] = 0
		 for h in range(0,64000):
		   sq[r] += (st1[0].data[h])**2   
		     # lowpass-filter the crossc_beat correlation function 
		 st1.filter('lowpass',freq = 24, zerophase=True, corners=8)
		 st1.filter('highpass', freq= 0.05, zerophase=True, corners=2) #had to be reduced from 0.1Hz
		 st1.filter('bandstop', freqmin=8, freqmax=14, corners=4, zerophase=True)
		 st2.filter('lowpass',freq = 24, zerophase=True, corners=8)
		 st2.filter('highpass', freq= 0.05, zerophase=True, corners=2) #had to be reduced from 0.1Hz
		 st2.filter('bandstop', freqmin=8, freqmax=14, corners=4, zerophase=True)
		 
		 	# sometimes channels seem to fail, so I put this to prevent crashing of the program
		 
		 
		 	# 1-bit normalization
		 tr1 = sign(st1[0].data)
		 tr2 = sign(st2[0].data)    	
		 	
		 # cross-correlation
		 index, value, acorr = xcorr(tr1, tr2, 25000, full_xcorr=True)
		
		 print sq[r]
		 
		 # check sanity
		 if np.max(acorr)>1:
		   acorr = zeros(50001)   
		   
		 # sort the 128sec files into calm and beat:
		 # the value was chosen after observing calm files
		 	
		 if sq[r] < 1000000000000:
		     corr128_calm += acorr
		     ncalm += 1.
		 else:
		     corr128_beat += acorr
		     nbeat += 1.
		 print ncalm,  nbeat  # just to check if calm or noisy
    except:
      nerror += 1
      print "%d : ERROR" %(r)
	 	
	 	
  if ncalm<8:
  	corr128_calm = np.zeros(50001)
  	
  # normalization	 	
  else:
  	corr128_calm = (corr128_calm/ncalm) * taper
  	
  corr128_beat = (corr128_beat/nbeat) * taper


  # filter again and divide into 3 bands which can be investigated separately
  
  corr128_calm_band1 = highpass(corr128_calm, freq=0.1, corners=4, zerophase=True, df=500.)
  corr128_calm_band1 = lowpass(corr128_calm_band1, freq=2, corners=4, zerophase=True, df=500.)
  corr128_calm_band2 = bandpass(corr128_calm, freqmin=2, freqmax=8, df=500., corners=4, zerophase=True)
  corr128_calm_band3 = bandpass(corr128_calm, freqmin=8, freqmax=24, df=500., corners=4, zerophase=True)
  corr128_beat_band1 = highpass(corr128_beat, freq=0.1, df=500., corners=4, zerophase=True)
  corr128_beat_band1 = lowpass(corr128_beat_band1, freq=2, corners=4, zerophase=True, df=500.)
  corr128_beat_band2 = bandpass(corr128_beat, freqmin=2, freqmax=8, df=500., corners=4, zerophase=True)
  corr128_beat_band3 = bandpass(corr128_beat, freqmin=8, freqmax=24, df=500., corners=4, zerophase=True)
  
  # SNR (Signal-to-Noise Ratio):print 222222
  # for the signal-to-noise ratio one divides the maximum of the signal by the
  # variance of a late window (noise). As we don't know which window has the
  # lowest signal fraction, we loop over some windows. We need windows of 
  # different lengths for the different bands as different frequencies are 
  # contained. For every band the minimum-frequency fmin is chosen (e.g. 4Hz), then
  # the time for one cyle is 1/fc (e.g. 0.25s) and as we take windows of 3-4 
  # cycles we choose a window length of 4*0.25s = 1s
  
  ## CALM + BEAT
  for isnrb1 in range(45000,50000,2500):  # steps of half a windowlength
    endwb1=isnrb1 + 2500  # 5s window
    SNR_calm_b1.append(np.max(np.abs(corr128_calm_band1))/np.std(corr128_calm_band1[isnrb1:endwb1]))
    SNR_beat_b1.append(np.max(np.abs(corr128_beat_band1))/np.std(corr128_beat_band1[isnrb1:endwb1]))
  SNR_calm_b1 = max(SNR_calm_b1)
  SNR_beat_b1 = max(SNR_beat_b1)
  
  for isnrb2 in range(45000,49001,500):  # steps of half a windowlength
    endwb2=isnrb2 + 1000  # 2s windows
    SNR_calm_b2.append(np.max(np.abs(corr128_calm_band2))/np.std(corr128_calm_band2[isnrb2:endwb2]))
    SNR_beat_b2.append(np.max(np.abs(corr128_beat_band2))/np.std(corr128_beat_band2[isnrb2:endwb2]))
  SNR_beat_b2 = max(SNR_beat_b2)
  SNR_calm_b2 = max(SNR_calm_b2)
  
  for isnrb3 in range(45000,49751,125):  # steps of half a windowlength
    endwb3=isnrb3 + 250  # 0.5s windows
    SNR_calm_b3.append(np.max(np.abs(corr128_calm_band3))/np.std(corr128_calm_band3[isnrb3:endwb3]))
    SNR_beat_b3.append(np.max(np.abs(corr128_beat_band3))/np.std(corr128_beat_band3[isnrb3:endwb3]))
  SNR_beat_b3 = max(SNR_beat_b3)
  SNR_calm_b3 = max(SNR_calm_b3)
  
  if ncalm<8:
  	SNR_calm_b1 = 0
  	SNR_calm_b2 = 0
  	SNR_calm_b3 = 0

  print SNR_calm_b1, SNR_calm_b2, SNR_calm_b3
  print SNR_beat_b1, SNR_beat_b2, SNR_beat_b3
    	
  # RMS for histogram and sifting:
  #for s in range(0,dend-dstart):
  #  rms.append((sq[s]/16000)**(0.5))

  # save into files:
  np.save(outdir + t_start + "-" +  t_end + "CH" + str(ch1) + "_" +"xcorr128s_beat_0-2Hz" + "_" + "CH" + str(ch2), corr128_beat_band1)
  np.save(outdir + t_start + "-" +  t_end + "CH" + str(ch1) + "_" +"xcorr128s_beat_2-8Hz" + "_" + "CH" + str(ch2), corr128_beat_band2)
  np.save(outdir + t_start + "-" +  t_end + "CH" + str(ch1) + "_" +"xcorr128s_beat_8-24Hz" + "_" + "CH" + str(ch2), corr128_beat_band3)
  
  np.save(outdir + t_start + "-" +  t_end + "CH" + str(ch1) + "_" +"xcorr128s_calm_0-2Hz" + "_" + "CH" + str(ch2), corr128_calm_band1)
  np.save(outdir + t_start + "-" +  t_end + "CH" + str(ch1) + "_" +"xcorr128s_calm_2-8Hz" + "_" + "CH" + str(ch2), corr128_calm_band2)
  np.save(outdir + t_start + "-" +  t_end + "CH" + str(ch1) + "_" +"xcorr128s_calm_8-24Hz" + "_" + "CH" + str(ch2), corr128_calm_band3) 

  # np.save(outdir + "JAN_"+"CH" + str(ch1) + "_" +"RMS" + "_" + "CH" + str(ch2) + str(dstart) + "-" + str(dend), rms)
  
  
  return corr128_beat_band1,corr128_beat_band2,corr128_beat_band3, corr128_calm_band1,corr128_calm_band2,corr128_calm_band3, ncalm, nbeat, SNR_beat_b1, SNR_beat_b2, SNR_beat_b3, SNR_calm_b1, SNR_calm_b2, SNR_calm_b3
Example #27
0
taper= np.blackman(int(len(time_vector) * taper_percentage))
taper_left, taper_right = np.array_split(taper,2)
taper = np.concatenate([taper_left,np.ones(len(time_vector)-len(taper)),taper_right])

ax = plt.subplot(111)

for k in range(1048728,1048840,4):
 end=k+4
 print end
 tr1=merge_single(6,k,end)
 tr2=merge_single(7,k,end)
 tr1.detrend('linear')
 tr2.detrend('linear')
 tr1.filter('lowpass',freq = 24, zerophase=True, corners=8)
 tr1.filter('highpass', freq= 0.05, zerophase=True, corners=2)
 tr1.filter('bandstop', freqmin=8, freqmax=14, corners=4, zerophase=True)
 tr2.filter('lowpass',freq = 24, zerophase=True, corners=8)
 tr2.filter('highpass', freq= 0.05, zerophase=True, corners=2)
 tr2.filter('bandstop', freqmin=8, freqmax=14, corners=4, zerophase=True)
 tr1=sign(tr1.data)
 tr2=sign(tr2.data)
 
 
 index,value,acorr = xcorr(tr1, tr2, 25000, full_xcorr=True)
 acorr = acorr*taper
 acorr = highpass(acorr, freq=0.1, corners=4, zerophase=True, df=500.)
 acorr = lowpass(acorr, freq=2, corners=4, zerophase=True, df=500.)
 ax.plot(time_vector,acorr/np.max(acorr) +k-1048728)
 corr+=acorr
ax.plot(time_vector,corr/np.max(corr)-4)
plt.show()
Example #28
0
def calculate_features(file_name):
    f = mat_to_data(file_name)
    fs = f['iEEGsamplingRate'][0, 0]
    eegData = f['data']
    time = np.arange(len(eegData)) / fs

    # define time windows to look at trends
    t_win = 30  # lets start with 30 second windows
    frac_overlap = .5  # at %50 overlap
    length = np.ceil(t_win * fs)
    overlap = np.ceil(length * frac_overlap)

    # loop over channels
    features_by_channel = []
    for i in range(0, 16):
        features = []
        data = eegData[:, i]
        eeg_All = data

        # measurements on all frequencies
        data_squared = np.power(data, 2)
        energy_all = np.trapz(data_squared, time)
        var_all = np.var(data)
        f_all, Pxx_all = signal.periodogram(data,
                                            fs,
                                            detrend='constant',
                                            return_onesided=True)

        features.append(energy_all)
        features.append(var_all)
        features.append(Pxx_all)

        # measure psd over time
        data_windowed = windows(eeg_All, length, overlap)
        time_windowed = windows(time, length, overlap)
        time_center = np.median(time_windowed, axis=-1)

        f_win, Pxx_win = signal.periodogram(data_windowed,
                                            fs,
                                            detrend='constant',
                                            return_onesided=True,
                                            axis=-1)

        # trends in psd over time
        psd_trend1 = []
        psd_trend2 = []
        for i in range(0, len(f_win)):
            m_Pxx, b_Pxx = np.polyfit(time_center, Pxx_win[:, i], 1)
            Pxx_var = np.var(Pxx_win[:, i])
            psd_trend1.append(m_Pxx)
            psd_trend2.append(Pxx_var)
        features.append(psd_trend1)
        features.append(psd_trend2)

        # subset data by EEG frequency: do any particular type of brain-wave correlate with pre-seizures?
        eeg_Delta = filter.lowpass(data, 4, fs, corners=2, zerophase=True)
        eeg_Theta = filter.bandpass(data, 4, 7, fs, corners=2, zerophase=True)
        eeg_Alpha = filter.bandpass(data, 7, 14, fs, corners=2, zerophase=True)
        eeg_Beta = filter.bandpass(data, 15, 30, fs, corners=2, zerophase=True)
        eeg_Gamma = filter.bandpass(data,
                                    30,
                                    100,
                                    fs,
                                    corners=2,
                                    zerophase=True)
        eeg_Mu = filter.bandpass(data, 8, 13, fs, corners=2, zerophase=True)
        eeg_High = filter.bandpass(data,
                                   100,
                                   200,
                                   fs,
                                   corners=2,
                                   zerophase=True)

        data_all_freqs = [
            eeg_Delta, eeg_Theta, eeg_Alpha, eeg_Beta, eeg_Gamma, eeg_Mu,
            eeg_High
        ]

        for j in range(0, len(data_all_freqs)):
            data = data_all_freqs[j]
            data_squared = np.power(data, 2)

            energy = np.trapz(data_squared, time)
            var = np.var(data)
            data_windowed = windows(data, length, overlap)

            # measure variance
            var = np.var(data_windowed, axis=-1)
            # variance trend
            m_var, b_var = np.polyfit(time_center, var, 1)

            # measure energy
            data_windowed_squared = np.power(data_windowed, 2)
            energy_win = np.trapz(data_windowed_squared, axis=-1)

            # energy trend
            m_energy, b_energy = np.polyfit(time_center, energy_win, 1)
Example #29
0
                     goal_duration - stream[0].stats.delta))

                stream[0].trim(
                    utcdatetime.UTCDateTime(goal_day.replace('-', '')),
                    utcdatetime.UTCDateTime(goal_day.replace('-', '')) +
                    goal_duration - stream[0].stats.delta,
                    pad=True,
                    fill_value=0.0)
                trace = stream[0]

                data = trace.data
                freq = preprocess_lowpass
                logging.debug("%s.%s Lowpass at %.2f Hz" %
                              (station, comp, freq))
                data = lowpass(trace.data,
                               freq,
                               trace.stats.sampling_rate,
                               zerophase=True)

                freq = preprocess_highpass
                logging.debug("%s.%s Highpass at %.2f Hz" %
                              (station, comp, freq))
                data = highpass(data,
                                freq,
                                trace.stats.sampling_rate,
                                zerophase=True)

                samplerate = trace.stats['sampling_rate']
                if samplerate != goal_sampling_rate:
                    if resampling_method == "Resample":
                        logging.debug("%s.%s Downsample to %.1f Hz" %
                                      (station, comp, goal_sampling_rate))
Example #30
0
                        data *= tp
                        trace.data = data
                    else:
                        trace.data *= 0
                    del data
                logging.debug("%s.%s Merging Stream" % (station, comp))
                stream.merge(fill_value=0) #fills gaps with 0s and gives only one 'Trace'
                logging.debug("%s.%s Slicing Stream to %s:%s" % (station, comp,utcdatetime.UTCDateTime(goal_day.replace('-','')),utcdatetime.UTCDateTime(goal_day.replace('-',''))+goal_duration-stream[0].stats.delta))
                
                stream[0].trim(utcdatetime.UTCDateTime(goal_day.replace('-','')),utcdatetime.UTCDateTime(goal_day.replace('-',''))+goal_duration-stream[0].stats.delta, pad=True,fill_value=0.0)
                trace = stream[0]

                data = trace.data
                freq = preprocess_lowpass
                logging.debug("%s.%s Lowpass at %.2f Hz" % (station, comp,freq))
                data = lowpass(trace.data, freq, trace.stats.sampling_rate,zerophase=True)
                
                freq = preprocess_highpass
                logging.debug("%s.%s Highpass at %.2f Hz" % (station, comp,freq))
                data = highpass(data, freq, trace.stats.sampling_rate,zerophase=True)

                samplerate = trace.stats['sampling_rate']
                if samplerate != goal_sampling_rate:
                    if resampling_method == "Resample":
                        logging.debug("%s.%s Downsample to %.1f Hz" % (station, comp,goal_sampling_rate))
                        data = resample(data, goal_sampling_rate/trace.stats.sampling_rate, 'sinc_best')
                    elif resampling_method == "Decimate":
                        logging.debug("%s.%s Decimate by a factor of %i" % (station, comp,decimation_factor))
                        data = data[::decimation_factor]
                    
                
Example #31
0
def xcorr2(tr1,
           tr2,
           sta1_inv=None,
           sta2_inv=None,
           instrument_response_output='vel',
           water_level=50.,
           window_seconds=3600,
           window_overlap=0.1,
           window_buffer_length=0,
           interval_seconds=86400,
           taper_length=0.05,
           resample_rate=None,
           flo=None,
           fhi=None,
           clip_to_2std=False,
           whitening=False,
           whitening_window_frequency=0,
           one_bit_normalize=False,
           envelope_normalize=False,
           verbose=1,
           logger=None):

    # Length of window_buffer in seconds
    window_buffer_seconds = window_buffer_length * window_seconds
    adjusted_taper_length = taper_length
    if (window_buffer_seconds):
        # adjust taper length
        adjusted_taper_length = taper_length / (1. + window_buffer_length * 2.)
    # end if

    sr1 = tr1.stats.sampling_rate
    sr2 = tr2.stats.sampling_rate
    sr1_orig = sr1
    sr2_orig = sr2
    tr1_d_all = tr1.data  # refstn
    tr2_d_all = tr2.data
    lentr1_all = tr1_d_all.shape[0]
    lentr2_all = tr2_d_all.shape[0]
    window_samples_1 = (window_seconds + 2 * window_buffer_seconds) * sr1
    window_samples_2 = (window_seconds + 2 * window_buffer_seconds) * sr2
    interval_samples_1 = interval_seconds * sr1
    interval_samples_2 = interval_seconds * sr2
    sr = 0
    resll = []

    # set day-aligned start-indices
    maxStartTime = max(tr1.stats.starttime, tr2.stats.starttime)
    dayAlignedStartTime = UTCDateTime(year=maxStartTime.year,
                                      month=maxStartTime.month,
                                      day=maxStartTime.day)
    itr1s = (dayAlignedStartTime - tr1.stats.starttime) * sr1
    itr2s = (dayAlignedStartTime - tr2.stats.starttime) * sr2

    if (resample_rate):
        sr1 = resample_rate
        sr2 = resample_rate
    # end if
    sr = max(sr1, sr2)
    xcorlen = int(2 * window_seconds * sr - 1)
    fftlen = 2**(int(np.log2(xcorlen)) + 1)

    intervalCount = 0
    windowsPerInterval = [
    ]  # Stores the number of windows processed per interval
    intervalStartSeconds = []
    intervalEndSeconds = []
    while itr1s < lentr1_all and itr2s < lentr2_all:
        itr1e = min(lentr1_all, itr1s + interval_samples_1)
        itr2e = min(lentr2_all, itr2s + interval_samples_2)

        while ((itr1s < 0) or (itr2s < 0)):
            itr1s += (window_samples_1 - 2*window_buffer_seconds*sr1_orig) - \
                     (window_samples_1 - 2*window_buffer_seconds*sr1_orig) * window_overlap
            itr2s += (window_samples_2 - 2*window_buffer_seconds*sr2_orig) - \
                     (window_samples_2 - 2*window_buffer_seconds*sr2_orig) * window_overlap
        # end while

        if (np.fabs(itr1e - itr1s) < sr1_orig
                or np.fabs(itr2e - itr2s) < sr2_orig):
            itr1s = itr1e
            itr2s = itr2e
            continue
        # end if

        if (tr1.stats.starttime + itr1s / sr1_orig !=
                tr2.stats.starttime + itr2s / sr2_orig):
            if (logger): logger.warning('Detected misaligned traces..')

        windowCount = 0
        wtr1s = int(itr1s)
        wtr2s = int(itr2s)
        resl = []

        while wtr1s < itr1e and wtr2s < itr2e:
            wtr1e = int(min(itr1e, wtr1s + window_samples_1))
            wtr2e = int(min(itr2e, wtr2s + window_samples_2))

            # Discard small windows
            if ((wtr1e - wtr1s < window_samples_1)
                    or (wtr2e - wtr2s < window_samples_2)
                    or (wtr1e - wtr1s < sr1_orig)
                    or (wtr2e - wtr2s < sr2_orig)):
                wtr1s = int(np.ceil(itr1e))
                wtr2s = int(np.ceil(itr2e))
                continue
            # end if

            # Discard windows with masked regions, i.e. with gaps or windows that are all zeros
            if (not (np.ma.is_masked(tr1_d_all[wtr1s:wtr1e])
                     or np.ma.is_masked(tr2_d_all[wtr2s:wtr2e])
                     or np.sum(tr1_d_all[wtr1s:wtr1e]) == 0
                     or np.sum(tr2_d_all[wtr2s:wtr2e]) == 0)):

                #logger.info('%s, %s' % (tr1.stats.starttime + wtr1s / 200., tr1.stats.starttime + wtr1e / sr1_orig))
                #logger.info('%s, %s' % (tr2.stats.starttime + wtr2s / 200., tr2.stats.starttime + wtr2e / sr2_orig))

                tr1_d = np.array(tr1_d_all[wtr1s:wtr1e], dtype=np.float32)
                tr2_d = np.array(tr2_d_all[wtr2s:wtr2e], dtype=np.float32)

                # STEP 1: detrend
                tr1_d = signal.detrend(tr1_d)
                tr2_d = signal.detrend(tr2_d)

                # STEP 2: demean
                tr1_d -= np.mean(tr1_d)
                tr2_d -= np.mean(tr2_d)

                # STEP 3: remove response
                if (sta1_inv):
                    resp_tr1 = Trace(
                        data=tr1_d,
                        header=Stats(
                            header={
                                'sampling_rate':
                                sr1_orig,
                                'npts':
                                len(tr1_d),
                                'network':
                                tr1.stats.network,
                                'station':
                                tr1.stats.station,
                                'location':
                                tr1.stats.location,
                                'channel':
                                tr1.stats.channel,
                                'starttime':
                                tr1.stats.starttime + float(wtr1s) / sr1_orig,
                                'endtime':
                                tr1.stats.starttime + float(wtr1e) / sr1_orig
                            }))
                    try:
                        resp_tr1.remove_response(
                            inventory=sta1_inv,
                            output=instrument_response_output.upper(),
                            water_level=water_level)
                    except Exception as e:
                        print(e)
                    # end try

                    tr1_d = resp_tr1.data
                # end if

                # remove response
                if (sta2_inv):
                    resp_tr2 = Trace(
                        data=tr2_d,
                        header=Stats(
                            header={
                                'sampling_rate':
                                sr2_orig,
                                'npts':
                                len(tr2_d),
                                'network':
                                tr2.stats.network,
                                'station':
                                tr2.stats.station,
                                'location':
                                tr2.stats.location,
                                'channel':
                                tr2.stats.channel,
                                'starttime':
                                tr2.stats.starttime + float(wtr2s) / sr2_orig,
                                'endtime':
                                tr2.stats.starttime + float(wtr2e) / sr2_orig
                            }))
                    try:
                        resp_tr2.remove_response(
                            inventory=sta2_inv,
                            output=instrument_response_output.upper(),
                            water_level=water_level)
                    except Exception as e:
                        print(e)
                    # end try

                    tr2_d = resp_tr2.data
                # end if

                # STEPS 4, 5: resample after lowpass @ resample_rate/2 Hz
                if (resample_rate):
                    tr1_d = lowpass(tr1_d,
                                    resample_rate / 2.,
                                    sr1_orig,
                                    corners=2,
                                    zerophase=True)
                    tr2_d = lowpass(tr2_d,
                                    resample_rate / 2.,
                                    sr2_orig,
                                    corners=2,
                                    zerophase=True)

                    tr1_d = Trace(
                        data=tr1_d,
                        header=Stats(header={
                            'sampling_rate': sr1_orig,
                            'npts': window_samples_1
                        })).resample(resample_rate, no_filter=True).data
                    tr2_d = Trace(
                        data=tr2_d,
                        header=Stats(header={
                            'sampling_rate': sr2_orig,
                            'npts': window_samples_2
                        })).resample(resample_rate, no_filter=True).data
                # end if

                # STEP 6: Bandpass
                if (flo and fhi):
                    tr1_d = bandpass(tr1_d,
                                     flo,
                                     fhi,
                                     sr1,
                                     corners=2,
                                     zerophase=True)
                    tr2_d = bandpass(tr2_d,
                                     flo,
                                     fhi,
                                     sr2,
                                     corners=2,
                                     zerophase=True)
                # end if

                # STEP 7: time-domain normalization
                # clip to +/- 2*std
                if (clip_to_2std):
                    std_tr1 = np.std(tr1_d)
                    std_tr2 = np.std(tr2_d)
                    clip_indices_tr1 = np.fabs(tr1_d) > 2 * std_tr1
                    clip_indices_tr2 = np.fabs(tr2_d) > 2 * std_tr2

                    tr1_d[clip_indices_tr1] = 2 * std_tr1 * np.sign(
                        tr1_d[clip_indices_tr1])
                    tr2_d[clip_indices_tr2] = 2 * std_tr2 * np.sign(
                        tr2_d[clip_indices_tr2])
                # end if

                # 1-bit normalization
                if (one_bit_normalize):
                    tr1_d = np.sign(tr1_d)
                    tr2_d = np.sign(tr2_d)
                # end if

                # Apply Rhys Hawkins-style default time domain normalization
                if (clip_to_2std == 0 and one_bit_normalize == 0):
                    # 0-mean
                    tr1_d -= np.mean(tr1_d)
                    tr2_d -= np.mean(tr2_d)

                    # unit-std
                    tr1_d /= np.std(tr1_d)
                    tr2_d /= np.std(tr2_d)
                # end if

                # STEP 8: taper
                if (adjusted_taper_length > 0):
                    tr1_d = taper(
                        tr1_d,
                        int(np.round(adjusted_taper_length * tr1_d.shape[0])))
                    tr2_d = taper(
                        tr2_d,
                        int(np.round(adjusted_taper_length * tr2_d.shape[0])))
                # end if

                # STEP 9: spectral whitening
                if (whitening):
                    tr1_d = whiten(tr1_d,
                                   sr1,
                                   window_freq=whitening_window_frequency)
                    tr2_d = whiten(tr2_d,
                                   sr2,
                                   window_freq=whitening_window_frequency)

                    # STEP 10: taper
                    if (adjusted_taper_length > 0):
                        tr1_d = taper(
                            tr1_d,
                            int(
                                np.round(adjusted_taper_length *
                                         tr1_d.shape[0])))
                        tr2_d = taper(
                            tr2_d,
                            int(
                                np.round(adjusted_taper_length *
                                         tr2_d.shape[0])))
                    # end if
                # end if

                # STEP 11: Final bandpass
                # apply zero-phase bandpass
                if (flo and fhi):
                    tr1_d = bandpass(tr1_d,
                                     flo,
                                     fhi,
                                     sr1,
                                     corners=2,
                                     zerophase=True)
                    tr2_d = bandpass(tr2_d,
                                     flo,
                                     fhi,
                                     sr2,
                                     corners=2,
                                     zerophase=True)
                # end if

                if (window_buffer_seconds):
                    # extract window of interest from buffered window
                    tr1_d = tr1_d[int(window_buffer_seconds *
                                      sr1):-int(window_buffer_seconds * sr1)]
                    tr2_d = tr2_d[int(window_buffer_seconds *
                                      sr2):-int(window_buffer_seconds * sr2)]
                # end if

                # cross-correlate waveforms
                if (sr1 < sr2):
                    fftlen2 = fftlen
                    fftlen1 = int((fftlen2 * 1.0 * sr1) / sr)
                    rf = zeropad_ba(
                        fftn(zeropad(tr1_d, fftlen1), shape=[fftlen1]),
                        fftlen2) * fftn(zeropad(ndflip(tr2_d), fftlen2),
                                        shape=[fftlen2])
                elif (sr1 > sr2):
                    fftlen1 = fftlen
                    fftlen2 = int((fftlen1 * 1.0 * sr2) / sr)
                    rf = fftn(zeropad(tr1_d, fftlen1),
                              shape=[fftlen1]) * zeropad_ba(
                                  fftn(zeropad(ndflip(tr2_d), fftlen2),
                                       shape=[fftlen2]), fftlen1)
                else:
                    rf = fftn(zeropad(tr1_d, fftlen), shape=[fftlen]) * fftn(
                        zeropad(ndflip(tr2_d), fftlen), shape=[fftlen])
                # end if

                if (not np.isnan(rf).any()):
                    resl.append(rf)
                    windowCount += 1
                # end if
            # end if

            wtr1s += int(
                (window_samples_1 - 2 * window_buffer_seconds * sr1_orig) -
                (window_samples_1 - 2 * window_buffer_seconds * sr1_orig) *
                window_overlap)
            wtr2s += int(
                (window_samples_2 - 2 * window_buffer_seconds * sr2_orig) -
                (window_samples_2 - 2 * window_buffer_seconds * sr2_orig) *
                window_overlap)
        # end while (windows within interval)

        if (verbose > 1):
            if (logger):
                logger.info('\tProcessed %d windows in interval %d' %
                            (windowCount, intervalCount))
        # end fi

        intervalStartSeconds.append(itr1s / sr1_orig +
                                    tr1.stats.starttime.timestamp)
        intervalEndSeconds.append(itr1e / sr1_orig +
                                  tr1.stats.starttime.timestamp)
        itr1s = itr1e
        itr2s = itr2e
        intervalCount += 1

        # Append an array of zeros if no windows were processed for the current interval
        if (windowCount == 0):
            resl.append(np.zeros(fftlen))
            if (verbose > 1):
                if (logger):
                    logger.info(
                        '\tWarning: No windows processed due to gaps in data in current interval'
                    )
            # end if
        # end if

        windowsPerInterval.append(windowCount)

        if (windowCount > 0):
            mean = reduce((lambda tx, ty: tx + ty), resl) / float(windowCount)
        else:
            mean = reduce((lambda tx, ty: tx + ty), resl)
        # end if

        if (envelope_normalize):
            step = np.sign(np.fft.fftfreq(fftlen, 1.0 / sr))
            mean = mean + step * mean  # compute analytic
        # end if

        mean = ifftn(mean)

        if (envelope_normalize):
            # Compute magnitude of mean
            mean = np.abs(mean)
            normFactor = np.max(mean)

            # mean can be 0 for a null result
            if (normFactor > 0):
                mean /= normFactor
            # end if
        # end if

        resll.append(mean[:xcorlen])
    # end while (iteration over intervals)

    if (len(resll)):
        return np.array(resll), np.array(windowsPerInterval), \
               np.array(intervalStartSeconds, dtype='i8'), \
               np.array(intervalEndSeconds, dtype='i8'), \
               sr
    else:
        return None, None, None, None, None
Example #32
0
def stalta_eventwindow_function(data, f1 = 0, f2 = 0,  wsta = 10, wlta = 60, thrON = 3.0, thrOFF = 2.0):

# function computing STA/LTA of the data channel
# data =[times values], with times in seconds
# f1, f2 corner frequencies of the bandpass signal
# if f2 = 0 assume a high pass
# if f1 = 0 assume a low pass
# wsta is the STA window size in seconds
# wlta is the LTA window size in seconds
# thrON and thrOFF are the trigger values for STA/LTA
# on output stalta=[times stalta]
# on output events=[event_start_time event_end_time SNR=max_stalta/1.0]

  from getopt import getopt
  from scipy import signal
  from numpy import loadtxt, savetxt
  import numpy
  import scipy


  import matplotlib.pyplot as plt
  import sys

  import obspy.signal.filter as obspy_filter

  from obspy.signal import trigger
  #from obspy.signal import filter

# start function computations
  stalta = numpy.zeros(data.shape)
  wavesor = data[:,1]    
  times = data[:,0]
  dt = times[2] - times[1]
  dt = float(dt)
  fs = 1 / dt
  nsta= int( wsta / dt )
  nlta= int( wlta / dt )

  waves = numpy.zeros(wavesor.shape)
  waves[:]=wavesor[:]
# filtering
  if (f1 > 0)  & (f2 > 0):
     waves = obspy_filter.bandpass(wavesor, f1, f2, fs, corners=4, zerophase=True)
  elif (f1 > 0) & (f2 <= 0):
     waves = obspy_filter.highpass(wavesor, f1, fs, corners=4, zerophase=True)
  elif (f1 <= 0) & (f2 > 0):
     waves = obspy_filter.lowpass(wavesor, f2, fs, corners=4, zerophase=True)

#  print("Calculating STA/LTA...")
  stalta_comp = trigger.classic_sta_lta(waves, nsta, nlta)
  eventlist = trigger.trigger_onset(stalta_comp, thrON, thrOFF, max_len=9e+99, max_len_delete=False)
  nev=len(eventlist)
  if nev > 0:
     eventtimes=(eventlist)*dt + times[0]
     events=numpy.zeros((nev,3))
     events[:,0:2:1]=eventtimes[:,0:2:1]

     for n in range(0,nev) :
        n1=eventlist[n,0]
        n2=eventlist[n,1]
        events[n,2] = numpy.amax(stalta_comp[n1:n2+1:1]) / 1.0

  else:
     events=numpy.zeros((nev,3))


  stalta[:,0]=times
  stalta[:,1]=stalta_comp

  return stalta, events ;
Example #33
0
def process(tr, lowcut, highcut, filt_order, samp_rate, debug,
            starttime=False, clip=False, length=86400,
            seisan_chan_names=True, ignore_length=False):
    """
    Basic function to process data, usually called by dayproc or shortproc.

    Functionally, this will bandpass, downsample and check headers and length
    of trace to ensure files start at the start of a day and are daylong.
    This is a simple wrapper on obspy functions, we include it here to provide
    a system to ensure all parts of the dataset are processed in the same way.

    .. note:: Usually this function is called via dayproc or shortproc.

    :type tr: obspy.core.trace.Trace
    :param tr: Trace to process
    :type lowcut: float
    :param lowcut: Low cut in Hz, if set to None and highcut is set, will use \
        a lowpass filter.
    :type highcut: float
    :param highcut: High cut in Hz, if set to None and lowcut is set, will \
        use a highpass filter.
    :type filt_order: int
    :param filt_order: Number of corners for filter.
    :type samp_rate: float
    :param samp_rate: Desired sampling rate in Hz.
    :type debug: int
    :param debug: Debug output level from 0-5, higher numbers = more output.
    :type starttime: obspy.core.utcdatetime.UTCDateTime
    :param starttime: Desired start of trace
    :type clip: bool
    :param clip: Whether to expect, and enforce a set length of data or not.
    :type length: float
    :param length: Use to set a fixed length for data from the given starttime.
    :type seisan_chan_names: bool
    :param seisan_chan_names:
        Whether channels are named like seisan channels (which are two letters
        rather than SEED convention of three) - defaults to True.
    :type ignore_length: bool
    :param ignore_length: See warning in dayproc.

    :return: Processed stream.
    :type: :class:`obspy.core.stream.Stream`
    """
    # Add sanity check
    if highcut and highcut >= 0.5 * samp_rate:
        raise IOError('Highcut must be lower than the nyquist')

    # Define the start-time
    if starttime:
        # Be nice and allow a datetime object.
        if isinstance(starttime, dt.date) or isinstance(starttime,
                                                        dt.datetime):
            starttime = UTCDateTime(starttime)
        day = starttime.date
    else:
        day = tr.stats.starttime.date

    if debug >= 2:
        print('Working on: ' + tr.stats.station + '.' + tr.stats.channel)
    if debug >= 5:
        tr.plot()
    # Do a brute force quality check
    qual = _check_daylong(tr)
    if not qual:
        msg = ("Data have more zeros than actual data, please check the raw",
               " data set-up and manually sort it: " + tr.stats.station + "." +
               tr.stats.channel)
        raise ValueError(msg)
    tr = tr.detrend('simple')
    # Detrend data before filtering
    if debug > 0:
        print('I have ' + str(len(tr.data)) + ' data points for ' +
              tr.stats.station + '.' + tr.stats.channel +
              ' before processing')

    # Sanity check to ensure files are daylong
    if float(tr.stats.npts / tr.stats.sampling_rate) != length and clip:
        if debug >= 2:
            print('Data for ' + tr.stats.station + '.' + tr.stats.channel +
                  ' are not of daylong length, will zero pad')
        if tr.stats.endtime - tr.stats.starttime < 0.8 * length\
           and not ignore_length:
            msg = ('Data for %s.%s is %i hours long, which is less than 0.8 '
                   'of the desired length, will not pad' %
                   (tr.stats.station, tr.stats.channel,
                    (tr.stats.endtime - tr.stats.starttime) / 3600))
            raise NotImplementedError(msg)
        # Use obspy's trim function with zero padding
        tr = tr.trim(starttime, starttime + length, pad=True, fill_value=0,
                     nearest_sample=True)
        # If there is one sample too many after this remove the first one
        # by convention
        if len(tr.data) == (length * tr.stats.sampling_rate) + 1:
            tr.data = tr.data[1:len(tr.data)]
        if not tr.stats.sampling_rate * length == tr.stats.npts:
                raise ValueError('Data are not daylong for ' +
                                 tr.stats.station + '.' + tr.stats.channel)

        print('I now have %i data points after enforcing length'
              % len(tr.data))
    # Check sampling rate and resample
    if tr.stats.sampling_rate != samp_rate:
        if debug >= 2:
            print('Resampling')
        tr.resample(samp_rate)
    # Filtering section
    tr = tr.detrend('simple')    # Detrend data again before filtering
    if highcut and lowcut:
        if debug >= 2:
            print('Bandpassing')
        tr.data = bandpass(tr.data, lowcut, highcut,
                           tr.stats.sampling_rate, filt_order, True)
    elif highcut:
        if debug >= 2:
            print('Lowpassing')
        tr.data = lowpass(tr.data, highcut, tr.stats.sampling_rate,
                          filt_order, True)
    elif lowcut:
        if debug >= 2:
            print('Highpassing')
        tr.data = highpass(tr.data, lowcut, tr.stats.sampling_rate,
                           filt_order, True)
    else:
        warnings.warn('No filters applied')
    # Account for two letter channel names in s-files and therefore templates
    if seisan_chan_names:
        tr.stats.channel = tr.stats.channel[0] + tr.stats.channel[-1]

    # Sanity check the time header
    if tr.stats.starttime.day != day and clip:
        warnings.warn("Time headers do not match expected date: " +
                      str(tr.stats.starttime))

    # Sanity check to ensure files are daylong
    if float(tr.stats.npts / tr.stats.sampling_rate) != length and clip:
        if debug >= 2:
            print('Data for ' + tr.stats.station + '.' + tr.stats.channel +
                  ' is not of daylong length, will zero pad')
        # Use obspy's trim function with zero padding
        tr = tr.trim(starttime, starttime + length, pad=True, fill_value=0,
                     nearest_sample=True)
        # If there is one sample too many after this remove the last one
        # by convention
        if len(tr.data) == (length * tr.stats.sampling_rate) + 1:
            tr.data = tr.data[1:len(tr.data)]
        if not tr.stats.sampling_rate * length == tr.stats.npts:
                raise ValueError('Data are not daylong for ' +
                                 tr.stats.station + '.' + tr.stats.channel)
    # Final visual check for debug
    if debug > 4:
        tr.plot()
    return tr
Example #34
0
def process(tr, lowcut, highcut, filt_order, samp_rate, debug,
            starttime=False, full_day=False):
    r"""Basic function to bandpass, downsample and check headers and length \
    of trace to ensure files start at the start of a day and are daylong.

    Works in place on data.  This is employed to ensure all parts of the data \
    are processed in the same way.

    .. note:: Usually this function is called via dayproc or shortproc.

    :type tr: obspy.Trace
    :param tr: Trace to process
    :type highcut: float
    :param highcut: High cut in Hz, if set to None and lowcut is set, will \
        use a highpass filter.
    :type lowcut: float
    :type lowcut: Low cut in Hz, if set to None and highcut is set, will use \
        a lowpass filter.
    :type filt_order: int
    :param filt_order: Number of corners for filter.
    :type samp_rate: float
    :param samp_rate: Desired sampling rate in Hz
    :type debug: int
    :param debug: Debug output level from 0-5, higher numbers = more output
    :type starttime: obspy.UTCDateTime
    :param starttime: Desired start of trace
    :type full_day: bool
    :param full_day: Whether to expect, and enforce a full day of data or not.

    :return: obspy.Stream

    .. note:: Will convert channel names to two charecters long.
    """
    import warnings
    from obspy.signal.filter import bandpass, lowpass, highpass
    # Add sanity check
    if highcut and highcut >= 0.5*samp_rate:
        raise IOError('Highcut must be lower than the nyquist')
    # Define the start-time
    if starttime:
        day = starttime.date
    else:
        day = tr.stats.starttime.date

    if debug >= 2:
        print('Working on: '+tr.stats.station+'.'+tr.stats.channel)
    if debug >= 5:
        tr.plot()
    # Do a brute force quality check
    qual = _check_daylong(tr)
    if not qual:
        msg = ("Data have more zeros than actual data, please check the raw",
               " data set-up and manually sort it")
        raise ValueError(msg)
    tr = tr.detrend('simple')    # Detrend data before filtering

    # If there is one sample too many remove the first sample - this occurs
    # at station FOZ where the first sample is zero when it shouldn't be,
    # Not real sample: generated during data download
    # if full_day:
    #     if len(tr.data) == (86400 * tr.stats.sampling_rate) + 1:
    #         tr.data = tr.data[1:len(tr.data)]
    if debug > 0:
        print('I have '+str(len(tr.data))+' data points for ' +
              tr.stats.station+'.'+tr.stats.channel+' before processing')

    # Sanity check to ensure files are daylong
    if float(tr.stats.npts / tr.stats.sampling_rate) != 86400.0\
       and full_day:
        if debug >= 2:
            print('Data for '+tr.stats.station+'.'+tr.stats.channel +
                  ' is not of daylong length, will zero pad')
        # Work out when the trace thinks it is starting
        # traceday = UTCDateTime(str(tr.stats.starttime.year)+'-' +
        #                        str(tr.stats.starttime.month)+'-' +
        #                        str(tr.stats.starttime.day))
        # Use obspy's trim function with zero padding
        tr = tr.trim(starttime, starttime+86400, pad=True, fill_value=0,
                     nearest_sample=True)
        # If there is one sample too many after this remove the last one
        # by convention
        if len(tr.data) == (86400 * tr.stats.sampling_rate) + 1:
            tr.data = tr.data[1:len(tr.data)]
        if not tr.stats.sampling_rate * 86400 == tr.stats.npts:
                raise ValueError('Data are not daylong for '+tr.stats.station +
                                 '.'+tr.stats.channel)

        print('I now have '+str(len(tr.data)) +
              ' data points after enforcing day length')

    # Check sampling rate and resample
    if tr.stats.sampling_rate != samp_rate:
        if debug >= 2:
            print('Resampling')
        tr.resample(samp_rate)

    # Filtering section
    tr = tr.detrend('simple')    # Detrend data again before filtering
    if highcut and lowcut:
        if debug >= 2:
            print('Bandpassing')
        tr.data = bandpass(tr.data, lowcut, highcut,
                           tr.stats.sampling_rate, filt_order, True)
    elif highcut:
        if debug >= 2:
            print('Lowpassing')
        tr.data = lowpass(tr.data, highcut, tr.stats.sampling_rate,
                          filt_order, True)
    elif lowcut:
        if debug >= 2:
            print('Highpassing')
        tr.data = highpass(tr.data, lowcut, tr.stats.sampling_rate,
                           filt_order, True)
    else:
        warnings.warn('No filters applied')

    # Account for two letter channel names in s-files and therefore templates
    tr.stats.channel = tr.stats.channel[0]+tr.stats.channel[-1]

    # Sanity check the time header
    if tr.stats.starttime.day != day != day and full_day:
        warnings.warn("Time headers do not match expected date: " +
                      str(tr.stats.starttime))

    # Sanity check to ensure files are daylong
    if float(tr.stats.npts / tr.stats.sampling_rate) != 86400.0 and full_day:
        if debug >= 2:
            print('Data for '+tr.stats.station+'.'+tr.stats.channel +
                  ' is not of daylong length, will zero pad')
        # Use obspy's trim function with zero padding
        tr = tr.trim(starttime, starttime+86400, pad=True, fill_value=0,
                     nearest_sample=True)
        # If there is one sample too many after this remove the last one
        # by convention
        if len(tr.data) == (86400 * tr.stats.sampling_rate) + 1:
            tr.data = tr.data[1:len(tr.data)]
        if not tr.stats.sampling_rate*86400 == tr.stats.npts:
                raise ValueError('Data are not daylong for '+tr.stats.station +
                                 '.'+tr.stats.channel)
    # Final visual check for debug
    if debug >= 4:
        tr.plot()
    return tr
Example #35
0
nts = 100

lonstart = 0.

area = rupture_len * dep * 2 / npoints * 1e10  # in cm**2

equator_len = 2 * np.pi * 6371

lat = np.zeros(npoints)
lon = np.linspace(lonstart, lonstart + rupture_len / equator_len * 360.,
                  npoints)
tinit = np.linspace(0., rupture_len, npoints) / (rupture_velo * vs)

stf = np.zeros(nts)
stf[1] = 1./dt
stf = lowpass(stf, 1./100., 1./dt)

plt.plot(stf)
plt.show()

f = open('strike_slip_eq.srf', 'w')
f.write('POINTS %d\n' % (npoints,))

for i in np.arange(npoints):
    # lon, lat, dep, stk, dip, area, tinit, dt
    f.write('%11.5f %11.5f %11.5f %11.5f %11.5f %11.5f %11.5f %11.5f\n' %
            (lon[i], lat[i], dep, strike, dip, area, tinit[i], dt))

    # rake, slip1, nt1, slip2, nt2, slip3, nt3
    f.write('%11.5f %11.5f %5d %11.5f %5d %11.5f %5d\n' %
            (rake, slip, nts, 0., 0, 0., 0))
Example #36
0
def process(tr,
            lowcut,
            highcut,
            filt_order,
            samp_rate,
            debug,
            starttime=False,
            clip=False,
            length=86400,
            seisan_chan_names=False,
            ignore_length=False,
            fill_gaps=True):
    """
    Basic function to process data, usually called by dayproc or shortproc.

    Functionally, this will bandpass, downsample and check headers and length
    of trace to ensure files start when they should and are the correct length.
    This is a simple wrapper on obspy functions, we include it here to provide
    a system to ensure all parts of the dataset are processed in the same way.

    .. note:: Usually this function is called via dayproc or shortproc.

    :type tr: obspy.core.trace.Trace
    :param tr: Trace to process
    :type lowcut: float
    :param lowcut: Low cut in Hz, if set to None and highcut is set, will use \
        a lowpass filter.
    :type highcut: float
    :param highcut: High cut in Hz, if set to None and lowcut is set, will \
        use a highpass filter.
    :type filt_order: int
    :param filt_order: Number of corners for filter.
    :type samp_rate: float
    :param samp_rate: Desired sampling rate in Hz.
    :type debug: int
    :param debug: Debug output level from 0-5, higher numbers = more output.
    :type starttime: obspy.core.utcdatetime.UTCDateTime
    :param starttime: Desired start of trace
    :type clip: bool
    :param clip: Whether to expect, and enforce a set length of data or not.
    :type length: float
    :param length: Use to set a fixed length for data from the given starttime.
    :type seisan_chan_names: bool
    :param seisan_chan_names:
        Whether channels are named like seisan channels (which are two letters
        rather than SEED convention of three) - defaults to True.
    :type ignore_length: bool
    :param ignore_length: See warning in dayproc.
    :type fill_gaps: bool
    :param fill_gaps: Whether to pad any gaps found with zeros or not.

    :return: Processed trace.
    :type: :class:`obspy.core.stream.Trace`
    """
    # Add sanity check
    if highcut and highcut >= 0.5 * samp_rate:
        raise IOError('Highcut must be lower than the nyquist')

    # Define the start-time
    if starttime:
        # Be nice and allow a datetime object.
        if isinstance(starttime, dt.date) or isinstance(
                starttime, dt.datetime):
            starttime = UTCDateTime(starttime)
        day = starttime.date
    else:
        day = tr.stats.starttime.date

    debug_print('Working on: ' + tr.stats.station + '.' + tr.stats.channel, 2,
                debug)
    if debug >= 5:
        tr.plot()
    # Check if the trace is gappy and pad if it is.
    gappy = False
    if isinstance(tr.data, np.ma.MaskedArray):
        gappy = True
        gaps, tr = _fill_gaps(tr)
    # Do a brute force quality check
    qual = _check_daylong(tr)
    if not qual:
        msg = ("Data have more zeros than actual data, please check the raw",
               " data set-up and manually sort it: " + tr.stats.station + "." +
               tr.stats.channel)
        raise ValueError(msg)
    tr = tr.detrend('simple')
    # Detrend data before filtering
    debug_print(
        'I have ' + str(len(tr.data)) + ' data points for ' +
        tr.stats.station + '.' + tr.stats.channel + ' before processing', 0,
        debug)

    # Sanity check to ensure files are daylong
    padded = False
    if float(tr.stats.npts / tr.stats.sampling_rate) != length and clip:
        debug_print(
            'Data for ' + tr.stats.station + '.' + tr.stats.channel +
            ' are not of daylong length, will zero pad', 2, debug)
        if tr.stats.endtime - tr.stats.starttime < 0.8 * length\
           and not ignore_length:
            raise NotImplementedError(
                "Data for {0}.{1} is {2} hours long, which is less than 80 "
                "percent of the desired length, will not pad".format(
                    tr.stats.station, tr.stats.channel,
                    (tr.stats.endtime - tr.stats.starttime) / 3600))
        # trim, then calculate length of any pads required
        tr = tr.trim(starttime, starttime + length, nearest_sample=True)
        pre_pad_secs = tr.stats.starttime - starttime
        post_pad_secs = (starttime + length) - tr.stats.endtime
        if pre_pad_secs > 0 or post_pad_secs > 0:
            padded = True
            pre_pad = np.zeros(int(pre_pad_secs * tr.stats.sampling_rate))
            post_pad = np.zeros(int(post_pad_secs * tr.stats.sampling_rate))
            debug_print(str(tr), 2, debug)
            debug_print(
                "Padding to day long with %f s before and %f s at end" %
                (pre_pad_secs, post_pad_secs), 1, debug)
            tr.data = np.concatenate([pre_pad, tr.data, post_pad])
            # Use this rather than the expected pad because of rounding samples
            tr.stats.starttime -= len(pre_pad) * tr.stats.delta
            debug_print(str(tr), 2, debug)
        # If there is one sample too many after this remove the first one
        # by convention
        if len(tr.data) == (length * tr.stats.sampling_rate) + 1:
            tr.data = tr.data[1:len(tr.data)]
        if not tr.stats.sampling_rate * length == tr.stats.npts:
            raise ValueError('Data are not daylong for ' + tr.stats.station +
                             '.' + tr.stats.channel)
        debug_print(
            'I now have %i data points after enforcing length' % len(tr.data),
            0, debug)
    # Check sampling rate and resample
    if tr.stats.sampling_rate != samp_rate:
        debug_print('Resampling', 1, debug)
        tr.resample(samp_rate)
    # Filtering section
    tr = tr.detrend('simple')  # Detrend data again before filtering
    if highcut and lowcut:
        debug_print('Bandpassing', 1, debug)
        tr.data = bandpass(tr.data, lowcut, highcut, tr.stats.sampling_rate,
                           filt_order, True)
    elif highcut:
        debug_print('Lowpassing', 1, debug)
        tr.data = lowpass(tr.data, highcut, tr.stats.sampling_rate, filt_order,
                          True)
    elif lowcut:
        debug_print('Highpassing', 1, debug)
        tr.data = highpass(tr.data, lowcut, tr.stats.sampling_rate, filt_order,
                           True)
    else:
        debug_print('No filters applied', 2, debug)
    # Account for two letter channel names in s-files and therefore templates
    if seisan_chan_names:
        tr.stats.channel = tr.stats.channel[0] + tr.stats.channel[-1]

    # Sanity check the time header
    if tr.stats.starttime.day != day and clip:
        debug_print(
            "Time headers do not match expected date: {0}".format(
                tr.stats.starttime), 2, debug)

    if padded:
        debug_print("Reapplying zero pads post processing", 1, debug)
        debug_print(str(tr), 2, debug)
        pre_pad = np.zeros(int(pre_pad_secs * tr.stats.sampling_rate))
        post_pad = np.zeros(int(post_pad_secs * tr.stats.sampling_rate))
        pre_pad_len = len(pre_pad)
        post_pad_len = len(post_pad)
        debug_print(
            "Taking only valid data between %i and %i samples" %
            (pre_pad_len, len(tr.data) - post_pad_len), 1, debug)
        # Re-apply the pads, taking only the data section that was valid
        tr.data = np.concatenate([
            pre_pad, tr.data[pre_pad_len:len(tr.data) - post_pad_len], post_pad
        ])
        debug_print(str(tr), 2, debug)
    # Sanity check to ensure files are daylong
    if float(tr.stats.npts / tr.stats.sampling_rate) != length and clip:
        debug_print(
            'Data for ' + tr.stats.station + '.' + tr.stats.channel +
            ' are not of daylong length, will zero pad', 1, debug)
        # Use obspy's trim function with zero padding
        tr = tr.trim(starttime,
                     starttime + length,
                     pad=True,
                     fill_value=0,
                     nearest_sample=True)
        # If there is one sample too many after this remove the last one
        # by convention
        if len(tr.data) == (length * tr.stats.sampling_rate) + 1:
            tr.data = tr.data[1:len(tr.data)]
        if not tr.stats.sampling_rate * length == tr.stats.npts:
            raise ValueError('Data are not daylong for ' + tr.stats.station +
                             '.' + tr.stats.channel)
    # Replace the gaps with zeros
    if gappy:
        tr = _zero_pad_gaps(tr, gaps, fill_gaps=fill_gaps)
    # Final visual check for debug
    if debug > 4:
        tr.plot()
    return tr
Example #37
0
npts = 512  # number of samples
nsec = 4.0  # length of signal in seconds
df = 20.0  # sampling rate
fNy = df / 2.0  # Nyquist frequency
fg1 = 8  # generator frequency 1 (initial: 8 Hz)
fg2 = 4  # generator frequency 2 (initial: 4 Hz)
time = np.linspace(0, nsec, (nsec * df) + 1)  # time axis for plotting

y = np.sin(2 * np.pi * fg1 * time)  # set up a test signal from two frequencies
y += np.sin(2 * np.pi * fg2 * time)

# downsample to 10 Hz by taking every second element
y_2 = y[::2]

# downsample after lowpassing the signal
y_l = lowpass(y, 5.0, df=df, corners=4, zerophase=False)
y_new = y_l[::2]

y_f = np.fft.rfft(y)  # transform all 3 signals into frequency domain
y_f2 = np.fft.rfft(y_2)  # applying Fourier transformation via FFT
y_fnew = np.fft.rfft(y_l)
freq = np.linspace(0, fNy, len(y_f))  # frequency axis for plotting

# plot
plt.subplot(211)
plt.plot(time, y, 'k', label="Original data", lw=1.5)
plt.plot(time[::2], y_2, 'r--', label="Downsample without lowpass", lw=2)
plt.plot(time[::2], y_new, 'g', label="Downsample with lowpass", lw=2)
plt.legend()
plt.ylim(-2, 4.5)
plt.title('Time Domain')
Example #38
0
def make_stf(dt=0.10, nt=5000, fmin=1.0/100.0, fmax=1.0/8.0, filename='../INPUT/stf_new', plot=True):

	"""
	Generate a source time function for ses3d by applying a bandpass filter to a Heaviside function.

	make_stf(dt=0.13, nt=4000, fmin=1.0/100.0, fmax=1.0/8.0, filename='../INPUT/stf_new', plot=True)

	dt: Length of the time step. Must equal dt in the event_* file.
	nt: Number of time steps. Must equal to or greater than nt in the event_* file.
	fmin: Minimum frequency of the bandpass.
	fmax: Maximum frequency of the bandpass.
	filename: Output filename.

	"""

	#- Make time axis and original Heaviside function. --------------------------------------------

	t = np.arange(0.0,float(nt+1)*dt,dt)
	h = np.ones(len(t))

	#- Apply filters. -----------------------------------------------------------------------------

	h = flt.highpass(h, fmin, 1.0/dt, 3, zerophase=False)
	h = flt.lowpass(h, fmax, 1.0/dt, 5, zerophase=False)

	#- Plot output. -------------------------------------------------------------------------------

	if plot == True:

		#- Time domain.

		plt.plot(t,h,'k')
		plt.xlim(0.0,float(nt)*dt)
		plt.xlabel('time [s]')
		plt.title('source time function (time domain)')

		plt.show()

		#- Frequency domain.

		hf = np.fft.fft(h)
		f = np.fft.fftfreq(len(hf), dt)

		plt.semilogx(f,np.abs(hf),'k')
		plt.plot([fmin,fmin],[0.0, np.max(np.abs(hf))],'r--')
		plt.text(1.1*fmin, 0.5*np.max(np.abs(hf)), 'fmin')
		plt.plot([fmax,fmax],[0.0, np.max(np.abs(hf))],'r--')
		plt.text(1.1*fmax, 0.5*np.max(np.abs(hf)), 'fmax')
		plt.xlim(0.1*fmin,10.0*fmax)
		plt.xlabel('frequency [Hz]')
		plt.title('source time function (frequency domain)')

		plt.show()

	#- Write to file. -----------------------------------------------------------------------------

	f = open(filename, 'w')

	#- Header.

	f.write('source time function, ses3d version 4.1\n')
	f.write('nt= '+str(nt)+', dt='+str(dt)+'\n')
	f.write('filtered Heaviside, highpass(fmin='+str(fmin)+', corners=3, zerophase=False), lowpass(fmax='+str(fmax)+', corners=5, zerophase=False)\n')
	f.write('-- samples --\n')

	for k in range(len(h)):
		f.write(str(h[k])+'\n')

	f.close()
Example #39
0
 def lp_sliprate(self, freq, corners=4, zerophase=False):
     self.sliprate = lowpass(self.sliprate, freq, 1./self.dt, corners,
                             zerophase)
Example #40
0
def process(tr,
            lowcut,
            highcut,
            filt_order,
            samp_rate,
            debug,
            starttime=False,
            full_day=False):
    r"""Basic function to bandpass, downsample and check headers and length \
    of trace to ensure files start at the start of a day and are daylong.

    Works in place on data.  This is employed to ensure all parts of the data \
    are processed in the same way.

    .. note:: Usually this function is called via dayproc or shortproc.

    :type tr: obspy.Trace
    :param tr: Trace to process
    :type highcut: float
    :param highcut: High cut in Hz, if set to None and lowcut is set, will \
        use a highpass filter.
    :type lowcut: float
    :type lowcut: Low cut in Hz, if set to None and highcut is set, will use \
        a lowpass filter.
    :type filt_order: int
    :param filt_order: Number of corners for filter.
    :type samp_rate: float
    :param samp_rate: Desired sampling rate in Hz
    :type debug: int
    :param debug: Debug output level from 0-5, higher numbers = more output
    :type starttime: obspy.UTCDateTime
    :param starttime: Desired start of trace
    :type full_day: bool
    :param full_day: Whether to expect, and enforce a full day of data or not.

    :return: obspy.Stream

    .. note:: Will convert channel names to two charecters long.
    """
    import warnings
    from obspy.signal.filter import bandpass, lowpass, highpass
    # Add sanity check
    if highcut and highcut >= 0.5 * samp_rate:
        raise IOError('Highcut must be lower than the nyquist')
    # Define the start-time
    if starttime:
        day = starttime.date
    else:
        day = tr.stats.starttime.date

    if debug >= 2:
        print('Working on: ' + tr.stats.station + '.' + tr.stats.channel)
    if debug >= 5:
        tr.plot()
    # Do a brute force quality check
    qual = _check_daylong(tr)
    if not qual:
        msg = ("Data have more zeros than actual data, please check the raw",
               " data set-up and manually sort it")
        raise ValueError(msg)
    tr = tr.detrend('simple')  # Detrend data before filtering

    # If there is one sample too many remove the first sample - this occurs
    # at station FOZ where the first sample is zero when it shouldn't be,
    # Not real sample: generated during data download
    # if full_day:
    #     if len(tr.data) == (86400 * tr.stats.sampling_rate) + 1:
    #         tr.data = tr.data[1:len(tr.data)]
    if debug > 0:
        print('I have ' + str(len(tr.data)) + ' data points for ' +
              tr.stats.station + '.' + tr.stats.channel + ' before processing')

    # Sanity check to ensure files are daylong
    if float(tr.stats.npts / tr.stats.sampling_rate) != 86400.0\
       and full_day:
        if debug >= 2:
            print('Data for ' + tr.stats.station + '.' + tr.stats.channel +
                  ' is not of daylong length, will zero pad')
        # Work out when the trace thinks it is starting
        # traceday = UTCDateTime(str(tr.stats.starttime.year)+'-' +
        #                        str(tr.stats.starttime.month)+'-' +
        #                        str(tr.stats.starttime.day))
        # Use obspy's trim function with zero padding
        tr = tr.trim(starttime,
                     starttime + 86400,
                     pad=True,
                     fill_value=0,
                     nearest_sample=True)
        # If there is one sample too many after this remove the last one
        # by convention
        if len(tr.data) == (86400 * tr.stats.sampling_rate) + 1:
            tr.data = tr.data[1:len(tr.data)]
        if not tr.stats.sampling_rate * 86400 == tr.stats.npts:
            raise ValueError('Data are not daylong for ' + tr.stats.station +
                             '.' + tr.stats.channel)

        print('I now have ' + str(len(tr.data)) +
              ' data points after enforcing day length')

    # Check sampling rate and resample
    if tr.stats.sampling_rate != samp_rate:
        if debug >= 2:
            print('Resampling')
        tr.resample(samp_rate)

    # Filtering section
    tr = tr.detrend('simple')  # Detrend data again before filtering
    if highcut and lowcut:
        if debug >= 2:
            print('Bandpassing')
        tr.data = bandpass(tr.data, lowcut, highcut, tr.stats.sampling_rate,
                           filt_order, True)
    elif highcut:
        if debug >= 2:
            print('Lowpassing')
        tr.data = lowpass(tr.data, highcut, tr.stats.sampling_rate, filt_order,
                          True)
    elif lowcut:
        if debug >= 2:
            print('Highpassing')
        tr.data = highpass(tr.data, lowcut, tr.stats.sampling_rate, filt_order,
                           True)
    else:
        warnings.warn('No filters applied')

    # Account for two letter channel names in s-files and therefore templates
    tr.stats.channel = tr.stats.channel[0] + tr.stats.channel[-1]

    # Sanity check the time header
    if tr.stats.starttime.day != day != day and full_day:
        warnings.warn("Time headers do not match expected date: " +
                      str(tr.stats.starttime))

    # Sanity check to ensure files are daylong
    if float(tr.stats.npts / tr.stats.sampling_rate) != 86400.0 and full_day:
        if debug >= 2:
            print('Data for ' + tr.stats.station + '.' + tr.stats.channel +
                  ' is not of daylong length, will zero pad')
        # Use obspy's trim function with zero padding
        tr = tr.trim(starttime,
                     starttime + 86400,
                     pad=True,
                     fill_value=0,
                     nearest_sample=True)
        # If there is one sample too many after this remove the last one
        # by convention
        if len(tr.data) == (86400 * tr.stats.sampling_rate) + 1:
            tr.data = tr.data[1:len(tr.data)]
        if not tr.stats.sampling_rate * 86400 == tr.stats.npts:
            raise ValueError('Data are not daylong for ' + tr.stats.station +
                             '.' + tr.stats.channel)
    # Final visual check for debug
    if debug >= 4:
        tr.plot()
    return tr
time = np.arange(0, len(decon))*dt

M = np.arange(0, len(decon))
N = len(M);

SeD = np.where(np.logical_and(M>=0, M<=(N/2)))
d1 = decon[SeD]

SeD2 = np.where(np.logical_and(M>N/2, M<=N+1))
d2 = decon[SeD2]

''' Relative source time function '''
stf = np.concatenate((d2, d1))

'''Cleaning the rSTF from high frequency noise '''
stf = lowpass(stf, 4, fr, corners=4, zerophase=True)
stf /= stf.max()

''' Fourier Transform of the rSTF '''
Cspec, Cfreq = mtspec(stf, delta=dt, time_bandwidth=2,
                      number_of_tapers=3)

m = len(Cspec)
Cspec = Cspec[range(m/2)]
Cfreq = Cfreq[range(m/2)]

''' Creating figure '''
fig = plt.figure()
ax1 = fig.add_subplot(211)
ax1.loglog(Cfreq, Cspec, 'grey', linewidth=1.7, label='Spectral ratio')
ax1.set_xlabel("Frequency [Hz]")