Beispiel #1
0
    def space_integral(self, weights=None):
        # ToDo: have this checked; including spatial sampling!
        # ToDo: Figure out how to assign the metadata...buh
        trace = Trace()
        trace.stats.sampling_rate = self.stats['Fs']

        # ToDo: Thinking about weights
        if not self.complex:
            if weights:
                trace.data = np.trapz(np.multiply(self.data[:], weights[:]),
                                      axis=0)
            else:
                trace.data = np.trapz(self.data[:], axis=0)
        #oDo complex wavefield
        else:
            if weights:
                trace.data_i = np.trapz(np.multiply(self.data_i[:],
                                                    weights[:]),
                                        axis=0)
                trace.data_r = np.trapz(np.multiply(self.data_r[:],
                                                    weights[:]),
                                        axis=0)
            else:
                trace.data_i = np.trapz(self.data_i[:], axis=0)
                trace.data_r = np.trapz(self.data_r[:], axis=0)

        return trace
Beispiel #2
0
    def test_align_traces(self):
        """Test the utils.stacking.align_traces function."""
        # Generate synth data
        import numpy as np
        from obspy import Trace

        synth = Trace()
        synth.data = np.zeros(200)
        synth.data[100] = 1.0
        sine_x = np.arange(0, 10.0, 0.5)
        damped_sine = np.exp(-sine_x) * np.sin(2 * np.pi * sine_x)
        synth.data = np.convolve(synth.data, damped_sine)
        # Normalize:
        synth.data = synth.data / synth.data.max()
        # maximum_synth = synth[0].data.max()
        # RMS_max = np.sqrt(np.mean(np.square(synth[0].data)))
        traces = [synth.copy() for i in range(10)]

        shifts, ccs = align_traces(traces, shift_len=2, master=False)
        for shift in shifts:
            self.assertEqual(shift, 0)

        # Force shifts and check shifts recovered
        shifts_in = np.arange(10)
        original_length = len(traces[0].data)
        for shift, tr in zip(shifts_in, traces):
            pad = np.zeros(shift)
            tr.data = np.concatenate([pad, tr.data])[0:original_length]

        shifts, ccs = align_traces(traces, shift_len=11, master=False)
        for shift_in, shift_out in zip(shifts_in, shifts):
            self.assertEqual(-1 * shift_in, shift_out)
Beispiel #3
0
    def test_detrend(self):
        """
        Test detrend method of trace
        """
        t = np.arange(10)
        data = 0.1 * t + 1.
        tr = Trace(data=data.copy())

        tr.detrend(type='simple')
        np.testing.assert_array_almost_equal(tr.data, np.zeros(10))

        tr.data = data.copy()
        tr.detrend(type='linear')
        np.testing.assert_array_almost_equal(tr.data, np.zeros(10))

        data = np.zeros(10)
        data[3:7] = 1.

        tr.data = data.copy()
        tr.detrend(type='simple')
        np.testing.assert_almost_equal(tr.data[0], 0.)
        np.testing.assert_almost_equal(tr.data[-1], 0.)

        tr.data = data.copy()
        tr.detrend(type='linear')
        np.testing.assert_almost_equal(tr.data[0], -0.4)
        np.testing.assert_almost_equal(tr.data[-1], -0.4)
Beispiel #4
0
    def test_detrend(self):
        """
        Test detrend method of trace
        """
        t = np.arange(10)
        data = 0.1 * t + 1.
        tr = Trace(data=data.copy())

        tr.detrend(type='simple')
        np.testing.assert_array_almost_equal(tr.data, np.zeros(10))

        tr.data = data.copy()
        tr.detrend(type='linear')
        np.testing.assert_array_almost_equal(tr.data, np.zeros(10))

        data = np.zeros(10)
        data[3:7] = 1.

        tr.data = data.copy()
        tr.detrend(type='simple')
        np.testing.assert_almost_equal(tr.data[0], 0.)
        np.testing.assert_almost_equal(tr.data[-1], 0.)

        tr.data = data.copy()
        tr.detrend(type='linear')
        np.testing.assert_almost_equal(tr.data[0], -0.4)
        np.testing.assert_almost_equal(tr.data[-1], -0.4)
Beispiel #5
0
    def test_channel_loop(self):
        """Test trigger generation in internal loop."""
        import numpy as np
        from eqcorrscan.utils.trigger import _channel_loop
        from eqcorrscan.utils.trigger import TriggerParameters
        from obspy import Trace

        parameters = [
            TriggerParameters({
                'station': 'TEST',
                'channel': 'SHZ',
                'sta_len': 0.3,
                'lta_len': 10.0,
                'thr_on': 10,
                'thr_off': 3,
                'lowcut': 2,
                'highcut': 20
            })
        ]
        tr = Trace()
        tr.data = np.random.randn(2000)
        tr.data[1000:1010] = [100, -80, 70, -65, 60, -52, 45, -30, 15, 5]
        tr.stats.sampling_rate = 100
        tr.stats.station = parameters[0]['station']
        tr.stats.channel = parameters[0]['channel']
        # Test without despike
        triggers = _channel_loop(tr=tr,
                                 parameters=parameters,
                                 max_trigger_length=100,
                                 despike=False)
        self.assertEqual(len(triggers), 1)
        # Test with despike
        triggers = _channel_loop(tr=tr,
                                 parameters=parameters,
                                 max_trigger_length=100,
                                 despike=True)
        self.assertEqual(len(triggers), 1)
        # Test with no filter
        parameters[0]['lowcut'] = None
        parameters[0]['highcut'] = None
        triggers = _channel_loop(tr=tr,
                                 parameters=parameters,
                                 max_trigger_length=100,
                                 despike=False)
        self.assertEqual(len(triggers), 1)
        # Test with lowpass
        parameters[0]['highcut'] = 20
        triggers = _channel_loop(tr=tr,
                                 parameters=parameters,
                                 max_trigger_length=100,
                                 despike=False)
        self.assertEqual(len(triggers), 1)
        # Test with highpass
        parameters[0]['highcut'] = None
        parameters[0]['lowcut'] = 2
        triggers = _channel_loop(tr=tr,
                                 parameters=parameters,
                                 max_trigger_length=100,
                                 despike=False)
        self.assertEqual(len(triggers), 1)
Beispiel #6
0
    def _upsample(self, trace, upfactor):
        """
        Upsample a data stream by a given factor, prior to decimation. The
        upsampling is done using a linear interpolation.

        Parameters
        ----------
        trace : obspy Trace object
            Trace to be upsampled
        upfactor : int
            Factor by which to upsample the data in trace

        Returns
        -------
        out : obpsy Trace object
            Upsampled trace

        """

        data = trace.data
        dnew = np.zeros(len(data) * upfactor - (upfactor - 1))
        dnew[::upfactor] = data
        for i in range(1, upfactor):
            dnew[i::upfactor] = float(i) / upfactor * data[1:] \
                         + float(upfactor - i) / upfactor * data[:-1]

        out = Trace()
        out.data = dnew
        out.stats = trace.stats
        out.stats.npts = len(out.data)
        out.stats.starttime = trace.stats.starttime
        out.stats.sampling_rate = int(upfactor * trace.stats.sampling_rate)

        return out
Beispiel #7
0
def get_difference(
    directory: Union[str, Path],
    trace: Trace,
) -> Stream:
    '''
    Get the data from the trace sent that does not match
    the data found in the MSCAN directory.  This will result in the data
    that should be written to the MSCAN directory.

    :param str directory: MSCAN directory
    :type trace: :class:`obspy.Trace`
    :param trace: trace data
    :rtype: :class:`obspy.Trace`
    '''
    stored_trace = __get_stored_trace(directory, trace)
    # No trace found in store, all are new
    if stored_trace.stats.npts == 0:
        logging.debug('No trace found in store')
        return trace.split()

    logging.debug(
        f'Trim content of read stream:\n{stored_trace}\nto match:\n{trace}')
    stored_trace = stored_trace.trim(
        trace.stats.starttime,
        trace.stats.endtime,
        pad=True,
    )
    if stored_trace.stats.npts != trace.stats.npts:
        raise RingException("Oops, I don't think I got the trim right!")
    mask = (stored_trace.data - trace.data) == 0
    trace.data = np.ma.array(trace.data, mask=mask)
    return trace.split()
Beispiel #8
0
    def getAICcf(self, traceID):
        '''
        Returns the Akaike criterion for a trace and the higher order statistics characteristic function.

        :param: traceID
        :type: int

        :param: cut, cut out a part of the trace (t_start, t_end) [s]
        :type: tuple

        :param: t2, size of the moving window [s]
        :type: float

        :param: order, order of the characteristic function
        :type: int
        '''

        st_cf = Stream()
        tr_cf = Trace()
        tr_cf.data = self.getHOScf(traceID).getCF()
        st_cf += tr_cf
        return AICcf(st_cf,
                     self.getCut(),
                     self.getTmovwind(),
                     stealthMode=True)
Beispiel #9
0
def conv2sac(root, component, datatype, conv_format):

    fpattern = 'seismo.' + component + '.*.' + 'sd' + datatype

    chan_conv = dict(X='1', Y='2', Z='3', R='R', T='T')

    files = glob.glob1(root, fpattern)

    for fn in files:
        data = np.loadtxt(fn)
        chan, station = fn.split('.')[1].strip().upper(), fn.split(
            '.')[2].strip()

        time = data[:, 0]
        stime = UTCDateTime(time[0])
        delta = time[1] - time[0]
        freq = 1 / delta
        data = data[:, 1]
        tr = Trace()
        tr.data = data
        tr.stats.station = station
        tr.stats.channel = chan_conv[chan]
        tr.stats.starttime = stime
        tr.stats.delta = delta
        tr.stats.sampling_rate = freq

        outfile = '{0}.{1}.{2}'.format(station, tr.stats.channel,
                                       conv_format.lower())
        tr.write(outfile, format=conv_format)
Beispiel #10
0
def test_windows():

    array1 = np.zeros(7, dtype=np.int)
    array2 = np.zeros(8, dtype=np.int)
    array1[3] = 1
    array2[4] = 1

    assert (my_centered(array1, 5))[2] == 1
    assert (my_centered(array2, 5))[2] == 1
    assert (my_centered(array1, 9))[4] == 1
    assert (my_centered(array2, 9))[4] == 1

    tr = Trace()
    tr.stats.sac = {}
    tr.stats.sac['dist'] = 3.0
    tr.data = my_centered(array1, 15) + 1
    params = {}
    params['hw'] = 1
    params['sep_noise'] = 0
    params['win_overlap'] = True
    params['wtype'] = 'hann'
    params['causal_side'] = True
    win = get_window(tr.stats, g_speed=1.0, params=params)
    assert (len(win) == 3)
    assert (pytest.approx(win[0][10]) == 1.0)

    snr = snratio(tr, g_speed=1.0, window_params=params)
    assert (int(snr) == 1)
Beispiel #11
0
def chirp_signal(time, fmin, fmax, dt):
    """
	Chirp signal
	"""

    w1 = chirp(time,
               f0=fmax,
               f1=fmin,
               t1=time[-1],
               method='quadratic',
               vertex_zero=False)

    f_w1 = fmin - (fmin - fmax) * (time[-1] - time)**2 / time[-1]**2

    fmin = fmin
    fmax = fmax + 5
    w2 = chirp(time,
               f0=fmax,
               f1=fmin,
               t1=time[-1],
               method='quadratic',
               vertex_zero=False)

    f_w2 = fmin - (fmin - fmax) * (time[-1] - time)**2 / time[-1]**2

    sig = w1 + w2[::-1]
    tr = Trace()
    tr.data = sig
    tr.stats.delta = dt
    tr.stats.starttime = 0
    tr.normalize()

    return tr
Beispiel #12
0
def read_smc_file(acc_file):
    # reads the "smc" file (synthetic accelerogram generated by smsim)
    # outputs the displacement as obspy trace object in cm format

    f = open(acc_file, 'r')
    lines = f.readlines()[40:]
    f.close()

    acc = []
    for ln in lines:
        for i in range(0, int(len(ln) / 10)):
            acc.append(float(ln[10 * i:10 * (i + 1)]))

    acc = array(acc)
    l = len(acc) * 8
    #acc = reshape(acc, (1, l))[0]

    acc_tr = Trace()
    acc_tr.data = acc  # in cm
    acc_tr.stats.sampling_rate = 100.
    acc_tr.stats.delta = 0.01

    vel_trace = acc_tr.copy().integrate()  # in cm/s

    # get max vel
    pgv = max(abs(vel_trace.data))
    return vel_trace, pgv
Beispiel #13
0
    def _add_trace_to_buffer(self, trace: Trace):
        """
        Add a trace to the buffer.

        Parameters
        ----------
        trace
            Trace to add to the internal buffer
        """

        with self.lock:
            Logger.debug(f"Adding data: Lock status: {self.lock}")
            try:
                self.buffer.add_stream(trace)
            except Exception as e:
                Logger.error(f"Could not add {trace} to buffer due to {e}")
            self.buffer_full = self.buffer.is_full()
        if trace.data.dtype == np.int32 and trace.data.dtype.type != np.int32:
            # Cope with a windows error where data come in as
            # "int32" not np.int32. See https://github.com/obspy/obspy/issues/2683
            trace.data = trace.data.astype(np.int32)
        Logger.debug("Buffer contains {0}".format(self.buffer))
        Logger.debug(f"Finished adding data: Lock status: {self.lock}")
        Logger.debug(f"Buffer stream: \n{self.buffer.stream}")
        self.stream = self.buffer.stream
        Logger.debug(f"Stream: \n{self.stream}")
Beispiel #14
0
    def test_sac_instrument_correction(self):
        # SAC recommends to taper the transfer function if a pure
        # deconvolution is done instead of simulating a different
        # instrument. This test checks the difference between the
        # result from removing the instrument response using SAC or
        # ObsPy. Visual inspection shows that the traces are pretty
        # much identical but differences remain (rms ~ 0.042). Haven't
        # found the cause for those, yet. One possible reason is the
        # floating point arithmetic of SAC vs. the double precision
        # arithmetic of Python. However differences still seem to be
        # too big for that.
        pzf = os.path.join(self.path, 'SAC_PZs_KARC_BHZ')
        sacf = os.path.join(self.path, 'KARC.LHZ.SAC.asc.gz')
        testsacf = os.path.join(self.path, 'KARC_corrected.sac.asc.gz')
        plow = 160.
        phigh = 4.
        fl1 = 1.0 / (plow + 0.0625 * plow)
        fl2 = 1.0 / plow
        fl3 = 1.0 / phigh
        fl4 = 1.0 / (phigh - 0.25 * phigh)
        # Uncomment the following to run the sac-commands
        # that created the testing file
        # if 1:
        #    import subprocess as sp
        #    p = sp.Popen('sac',shell=True,stdin=sp.PIPE)
        #    cd1 = p.stdin
        #    print("r %s"%sacf, file=cd1)
        #    print("rmean", file=cd1)
        #    print("rtrend", file=cd1)
        #    print("taper type cosine width 0.03", file=cd1)
        #    print("transfer from polezero subtype %s to none \
        #    freqlimits %f %f %f %f" % (pzf, fl1, fl2, fl3, fl4), file=cd1)
        #    print("w over ./data/KARC_corrected.sac", file=cd1)
        #    print("quit", file=cd1)
        #    cd1.close()
        #    p.wait()

        stats = {'network': 'KA', 'delta': 0.99999988079072466,
                 'station': 'KARC', 'location': 'S1',
                 'starttime': UTCDateTime(2001, 2, 13, 0, 0, 0, 993700),
                 'calib': 1.00868e+09, 'channel': 'BHZ'}
        with gzip.open(sacf) as f:
            tr = Trace(np.loadtxt(f), stats)

        attach_paz(tr, pzf, tovel=False)
        tr.data = simulate_seismometer(
            tr.data, tr.stats.sampling_rate, paz_remove=tr.stats.paz,
            remove_sensitivity=False, pre_filt=(fl1, fl2, fl3, fl4))

        with gzip.open(testsacf) as f:
            data = np.loadtxt(f)

        # import matplotlib.pyplot as plt
        # plt.plot(tr.data)
        # plt.plot(data)
        # plt.show()
        rms = np.sqrt(np.sum((tr.data - data) ** 2) /
                      np.sum(tr.data ** 2))
        self.assertTrue(rms < 0.0421)
Beispiel #15
0
    def test_SacInstCorrection(self):
        # SAC recommends to taper the transfer function if a pure
        # deconvolution is done instead of simulating a different
        # instrument. This test checks the difference between the
        # result from removing the instrument response using SAC or
        # ObsPy. Visual inspection shows that the traces are pretty
        # much identical but differences remain (rms ~ 0.042). Haven't
        # found the cause for those, yet. One possible reason is the
        # floating point arithmetic of SAC vs. the double precision
        # arithmetic of Python. However differences still seem to be
        # too big for that.
        pzf = os.path.join(self.path, 'SAC_PZs_KARC_BHZ')
        sacf = os.path.join(self.path, 'KARC.LHZ.SAC.asc.gz')
        testsacf = os.path.join(self.path, 'KARC_corrected.sac.asc.gz')
        plow = 160.
        phigh = 4.
        fl1 = 1.0 / (plow + 0.0625 * plow)
        fl2 = 1.0 / plow
        fl3 = 1.0 / phigh
        fl4 = 1.0 / (phigh - 0.25 * phigh)
        #Uncomment the following to run the sac-commands
        #that created the testing file
        #if 1:
        #    import subprocess as sp
        #    p = sp.Popen('sac',shell=True,stdin=sp.PIPE)
        #    cd1 = p.stdin
        #    print >>cd1, "r %s"%sacf
        #    print >>cd1, "rmean"
        #    print >>cd1, "rtrend"
        #    print >>cd1, "taper type cosine width 0.03"
        #    print >>cd1, "transfer from polezero subtype %s to none \
        #    freqlimits %f %f %f %f" % (pzf, fl1, fl2, fl3, fl4)
        #    print >>cd1, "w over ./data/KARC_corrected.sac"
        #    print >>cd1, "quit"
        #    cd1.close()
        #    p.wait()

        stats = {'network': 'KA', 'delta': 0.99999988079072466,
                 'station': 'KARC', 'location': 'S1',
                 'starttime': UTCDateTime(2001, 2, 13, 0, 0, 0, 993700),
                 'calib': 1.00868e+09, 'channel': 'BHZ'}
        tr = Trace(np.loadtxt(sacf), stats)

        attach_paz(tr, pzf, tovel=False)
        tr.data = seisSim(tr.data, tr.stats.sampling_rate,
                          paz_remove=tr.stats.paz, remove_sensitivity=False,
                          pre_filt=(fl1, fl2, fl3, fl4))

        data = np.loadtxt(testsacf)

        # import matplotlib.pyplot as plt
        # plt.plot(tr.data)
        # plt.plot(data)
        # plt.show()
        rms = np.sqrt(np.sum((tr.data - data) ** 2) /
                      np.sum(tr.data ** 2))
        self.assertTrue(rms < 0.0421)
Beispiel #16
0
def _buffer_write_mseed3_trace(
    handle: io.RawIOBase,
    trace: obspy.Trace,
    max_record_length: int,
    encoding: typing.Optional[utils.Encoding] = None,
    publication_version: typing.Optional[int] = None,
    record_level_flags: typing.Optional[utils.RecordFlag] = None,
    record_level_extra_data: typing.Optional[typing.Dict] = None,
    verbose: typing.Union[bool, int] = False,
) -> None:

    encoding = utils._get_or_check_encoding(data=trace.data, encoding=encoding)

    # The only case in which we'll convert data types if for int16.
    if trace.data.dtype == np.int16:
        trace = trace.copy()
        trace.data = np.require(trace.data, dtype=np.int32)

    ms_record = _trace_to_ms_record(
        trace=trace,
        publication_version=publication_version,
        record_level_flags=record_level_flags,
        record_level_extra_data=record_level_extra_data,
        record_length=max_record_length,
        encoding=encoding,
    )

    packed_samples = C.c_longlong(0)

    # Callback function for mstl3_pack to actually write the file
    def record_handler(record, reclen, _stream):
        handle.write(record[0:reclen])

    # Define Python callback function for use in C function
    rec_handler = C.CFUNCTYPE(None, C.POINTER(C.c_char), C.c_int,
                              C.c_void_p)(record_handler)

    utils._lib.msr3_pack(
        C.pointer(ms_record),
        rec_handler,
        # Pointer passed to the callback function.
        C.c_void_p(),
        # The number of packed samples - returned to the caller.
        C.pointer(packed_samples),
        # flags. Always flush the data - seems to be what we want in ObsPy.
        utils._MSF_FLUSHDATA,
        # verbose,
        utils._verbosity_to_int(verbose),
    )

    # Assure all samples have been packed.
    if packed_samples.value != trace.stats.npts:
        msg = (f"Only {packed_samples.value} samples out of "
               f"{trace.stats.npts} samples have been packed.")
        raise ValueError(msg)
Beispiel #17
0
def trace_to_spectrum_df(
    trace: obspy.Trace,
    motion_type: str,
    freq_count: Optional[int] = None,
    taper_buffer: float = 0.15,
    min_length: int = 20,
    **kwargs,
) -> pd.DataFrame:
    """
    Convert a trace to a spectrum dataframe.

    The trace's response should have been removed, and the motion type must
    be provided.

    Parameters
    ----------
    trace
        Trace containing the data
    motion_type
        Either acceleration, velocity, or displacement
    freq_count
        If not None, the number of frequencies the dataframe have
        between 0 and the Nyquist frequency. If less than the nyquist it will
        be trimmed from the end. If greater than the nyquist it will be zero
        padded. The actual frequencies will be freq_count + 1 due to zero
        frequency.
    taper_buffer
        The amount to buffer each trace on each end.
    min_length
        The minimum number of samples that should be in the output df, else
        raise
    """
    assert motion_type == "velocity", "only velocity supported for now"
    # trim from beginning to freq_count * 2 if needed
    trace = trace.copy()  # don't mutate the data!
    if freq_count and freq_count < len(trace.data) / 2:
        trace.data = trace.data[:freq_count * 2]
    tr_dict = _get_all_motion_types(trace, motion_type=motion_type)
    tr_dict = _prefft(tr_dict,
                      taper_buffer=taper_buffer,
                      freq_count=freq_count)
    # get sampling rate and ensure all traces are the same length
    sr = tr_dict["velocity"].stats.sampling_rate
    lens = list({len(tr.data) for tr in tr_dict.values()})
    assert len(lens) == 1, "all traces should have the same length"
    # create dataframe and return
    ar = np.vstack([np.fft.rfft(x.data) for x in tr_dict.values()])
    freqs = np.fft.rfftfreq(lens[0], 1.0 / sr)
    df = pd.DataFrame(ar.T, index=freqs, columns=list(MOTION_TYPES))
    if len(df) < min_length:
        msg = (f"trace from {trace.id} is {len(df)} samples but {min_length}"
               f" are required!")
        raise DataQualityError(msg)
    return df
Beispiel #18
0
def test_check_trace_consistent():
    tr1 = Trace()
    tr1.data = np.zeros(10)
    tr2 = Trace()
    tr1.data = np.zeros(10)
    assert util.check_trace_consistent(tr1, tr2) is None

    tr2.data = np.zeros(20)
    assert util.check_trace_consistent(tr1, tr2) is None
    with pytest.raises(ValueError):
        util.check_trace_consistent(tr1, tr2, mode="full")

    tr2.stats.delta = 2 * tr1.stats.delta
    with pytest.raises(ValueError):
        util.check_trace_consistent(tr1, tr2, mode="")

    tr2.stats.delta = tr1.stats.delta
    tr2.stats.starttime = tr1.stats.starttime + 1
    with pytest.raises(ValueError):
        util.check_trace_consistent(tr1, tr2, mode="test")
Beispiel #19
0
 def test_writeSACXYWithMinimumStats(self):
     """
     Write SACXY with minimal stats header, no inhereted from SAC file
     """
     tr = Trace()
     tr.stats.delta = 0.01
     tr.data = np.arange(0, 3000)
     sac_file = NamedTemporaryFile().name
     tr.write(sac_file, 'SACXY')
     st = read(sac_file)
     os.remove(sac_file)
     self.assertEquals(st[0].stats.delta, 0.01)
     self.assertEquals(st[0].stats.sampling_rate, 100.0)
Beispiel #20
0
 def test_write_sac_xy_with_minimum_stats(self):
     """
     Write SACXY with minimal stats header, no inhereted from SAC file
     """
     tr = Trace()
     tr.stats.delta = 0.01
     tr.data = np.arange(0, 3000)
     with NamedTemporaryFile() as tf:
         sac_file = tf.name
         tr.write(sac_file, 'SACXY')
         st = read(sac_file)
     self.assertEqual(st[0].stats.delta, 0.01)
     self.assertEqual(st[0].stats.sampling_rate, 100.0)
Beispiel #21
0
 def test_write_sac_xy_with_minimum_stats(self):
     """
     Write SACXY with minimal stats header, no inhereted from SAC file
     """
     tr = Trace()
     tr.stats.delta = 0.01
     tr.data = np.arange(0, 3000)
     with NamedTemporaryFile() as tf:
         sac_file = tf.name
         tr.write(sac_file, 'SACXY')
         st = read(sac_file)
     self.assertEqual(st[0].stats.delta, 0.01)
     self.assertEqual(st[0].stats.sampling_rate, 100.0)
Beispiel #22
0
    def test_channel_loop(self):
        """Test trigger generation in internal loop."""
        import numpy as np
        from eqcorrscan.utils.trigger import _channel_loop
        from eqcorrscan.utils.trigger import TriggerParameters
        from obspy import Trace

        parameters = [TriggerParameters({'station': 'TEST',
                                         'channel': 'SHZ',
                                         'sta_len': 0.3,
                                         'lta_len': 10.0,
                                         'thr_on': 10,
                                         'thr_off': 3,
                                         'lowcut': 2,
                                         'highcut': 20})]
        tr = Trace()
        tr.data = np.random.randn(2000)
        tr.data[1000:1010] = [100, -80, 70, -65, 60, -52, 45, -30, 15, 5]
        tr.stats.sampling_rate = 100
        tr.stats.station = parameters[0]['station']
        tr.stats.channel = parameters[0]['channel']
        # Test without despike
        triggers = _channel_loop(tr=tr, parameters=parameters,
                                 max_trigger_length=100,
                                 despike=False, debug=0)
        self.assertEqual(len(triggers), 1)
        # Test with despike
        triggers = _channel_loop(tr=tr, parameters=parameters,
                                 max_trigger_length=100,
                                 despike=True, debug=0)
        self.assertEqual(len(triggers), 1)
        # Test with no filter
        parameters[0]['lowcut'] = None
        parameters[0]['highcut'] = None
        triggers = _channel_loop(tr=tr, parameters=parameters,
                                 max_trigger_length=100,
                                 despike=False, debug=0)
        self.assertEqual(len(triggers), 1)
        # Test with lowpass
        parameters[0]['highcut'] = 20
        triggers = _channel_loop(tr=tr, parameters=parameters,
                                 max_trigger_length=100,
                                 despike=False, debug=0)
        self.assertEqual(len(triggers), 1)
        # Test with highpass
        parameters[0]['highcut'] = None
        parameters[0]['lowcut'] = 2
        triggers = _channel_loop(tr=tr, parameters=parameters,
                                 max_trigger_length=100,
                                 despike=False, debug=0)
        self.assertEqual(len(triggers), 1)
Beispiel #23
0
 def test_times(self):
     """
     Test if the correct times array is returned for normal traces and
     traces with gaps.
     """
     tr = Trace(data=np.ones(100))
     tr.stats.sampling_rate = 20
     start = UTCDateTime(2000, 1, 1, 0, 0, 0, 0)
     tr.stats.starttime = start
     tm = tr.times()
     self.assertAlmostEqual(tm[-1], tr.stats.endtime - tr.stats.starttime)
     tr.data = np.ma.ones(100)
     tr.data[30:40] = np.ma.masked
     tm = tr.times()
     self.assertTrue(np.alltrue(tr.data.mask == tm.mask))
Beispiel #24
0
 def test_issue_156(self):
     """
     Test case for issue #156.
     """
     # 1
     tr = Trace()
     tr.stats.delta = 0.01
     tr.data = np.arange(0, 3000)
     with NamedTemporaryFile() as tf:
         sac_file = tf.name
         tr.write(sac_file, 'SAC')
         st = read(sac_file)
     self.assertEqual(st[0].stats.delta, 0.01)
     self.assertEqual(st[0].stats.sampling_rate, 100.0)
     # 2
     tr = Trace()
     tr.stats.delta = 0.005
     tr.data = np.arange(0, 2000)
     with NamedTemporaryFile() as tf:
         sac_file = tf.name
         tr.write(sac_file, 'SAC')
         st = read(sac_file)
     self.assertEqual(st[0].stats.delta, 0.005)
     self.assertEqual(st[0].stats.sampling_rate, 200.0)
Beispiel #25
0
 def test_issue_156(self):
     """
     Test case for issue #156.
     """
     # 1
     tr = Trace()
     tr.stats.delta = 0.01
     tr.data = np.arange(0, 3000)
     with NamedTemporaryFile() as tf:
         sac_file = tf.name
         tr.write(sac_file, 'SAC')
         st = read(sac_file)
     self.assertEqual(st[0].stats.delta, 0.01)
     self.assertEqual(st[0].stats.sampling_rate, 100.0)
     # 2
     tr = Trace()
     tr.stats.delta = 0.005
     tr.data = np.arange(0, 2000)
     with NamedTemporaryFile() as tf:
         sac_file = tf.name
         tr.write(sac_file, 'SAC')
         st = read(sac_file)
     self.assertEqual(st[0].stats.delta, 0.005)
     self.assertEqual(st[0].stats.sampling_rate, 200.0)
Beispiel #26
0
 def test_times(self):
     """
     Test if the correct times array is returned for normal traces and
     traces with gaps.
     """
     tr = Trace(data=np.ones(100))
     tr.stats.sampling_rate = 20
     start = UTCDateTime(2000, 1, 1, 0, 0, 0, 0)
     tr.stats.starttime = start
     tm = tr.times()
     self.assertAlmostEquals(tm[-1], tr.stats.endtime - tr.stats.starttime)
     tr.data = np.ma.ones(100)
     tr.data[30:40] = np.ma.masked
     tm = tr.times()
     self.assertTrue(np.alltrue(tr.data.mask == tm.mask))
Beispiel #27
0
def _convert_adj_to_trace(adj):
    """
    Convert AdjointSource to Trace,for internal use only
    """

    tr = Trace()
    tr.data = adj.adjoint_source
    tr.stats.starttime = adj.starttime
    tr.stats.delta = adj.dt

    tr.stats.channel = adj.component
    tr.stats.station = adj.station
    tr.stats.network = adj.network
    tr.stats.location = adj.location

    return tr
Beispiel #28
0
def MakeGcfTrace(gcf_block): 
    tr=Trace()
    tr.data=gcf_block.data
    tr.stats.sampling_rate=gcf_block.header['sample_rate']
    tr.stats.starttime=GuralpToUtcTime(gcf_block.header['g_day'], 
                                        gcf_block.header['g_sec'])
    
    #NOT TESTED:
    if gcf_block.header['sample_rate'] in _special_tfod:
        offset=gcf_block.header['tfon']/ \
            _special_tfod[gcf_block.header['sample_rate']]
        tr.stats.start_time+=offset
        
    tr.stats.station=gcf_block.header['systemId']
    
    return tr
Beispiel #29
0
def _convert_adj_to_trace(adj, starttime, chan_id):
    """
    Convert AdjointSource to Trace,for internal use only
    """

    tr = Trace()
    tr.data = adj.adjoint_source
    tr.stats.starttime = starttime
    tr.stats.delta = adj.dt

    tr.stats.channel = str(chan_id.split(".")[-1])
    tr.stats.station = adj.station
    tr.stats.network = adj.network
    tr.stats.location = chan_id.split(".")[2]

    return tr
Beispiel #30
0
def _convert_adj_to_trace(adj):
    """
    Convert AdjointSource to Trace,for internal use only
    """

    tr = Trace()
    tr.data = adj.adjoint_source
    tr.stats.starttime = adj.starttime
    tr.stats.delta = adj.dt

    tr.stats.channel = adj.component
    tr.stats.station = adj.station
    tr.stats.network = adj.network
    tr.stats.location = adj.location

    return tr
Beispiel #31
0
def continuous_signal(time, low, high, dt):
    """
    Continuous source-signal
    """

    sig = (np.random.rand(time.size) * 2 - 1)

    tr = Trace()
    tr.data = sig
    tr.stats.delta = dt
    tr.stats.starttime = 0
    tr.filter('bandpass', freqmin=low, freqmax=high, corners=4)

    tr.normalize()

    return tr
Beispiel #32
0
def read_segd(filename):
    fp = open(filename, 'rb')
    generalh = _read_ghb1(fp)
    generalh.update(_read_ghb2(fp))
    generalh.update(_read_ghb3(fp))
    sch = {}
    for n in range(generalh['n_channel_sets_per_record']):
        try:
            _sch = _read_sch(fp)
        except SEGDScanTypeError:
            continue
        sch[_sch['channel_set_number']] = _sch
    size = generalh['extended_header_length']*32
    extdh = _read_extdh(fp, size)
    ext_hdr_lng = generalh['external_header_length']
    if ext_hdr_lng == 0xFF:
        ext_hdr_lng = generalh['external_header_blocks']
    size = ext_hdr_lng*32
    extrh = _read_extrh(fp, size)
    sample_rate = extdh['sample_rate_in_us']/1e6
    npts = extdh['number_of_samples_in_trace']
    size = npts
    st = Stream()
    convert_to_int = True
    for n in range(extdh['total_number_of_traces']):
        traceh, data = _read_trace_data_block(fp, size)
        # check if all traces can be converted to int
        convert_to_int = convert_to_int and np.all(np.mod(data, 1) == 0)
        # _print_dict(traceh, '***TRACEH:')
        tr = Trace(data)
        tr.stats.station = str(traceh['unit_serial_number'])
        tr.stats.channel = _band_code(1./sample_rate)
        tr.stats.channel += _instrument_orientation_code[traceh['sensor_code']]
        tr.stats.delta = sample_rate
        tr.stats.starttime = generalh['time']
        tr.stats.segd = _build_segd_header(generalh, sch, extdh, extrh, traceh)
        st.append(tr)
    fp.close()
    # for n, _sch in sch.iteritems():
    #     _print_dict(_sch, '***SCH %d:' % n)
    # _print_dict(extdh, '***EXTDH:')
    # print('***EXTRH:\n %s' % extrh)
    # _print_dict(generalh, '***GENERALH:')
    if convert_to_int:
        for tr in st:
            tr.data = tr.data.astype(np.int32)
    return st
Beispiel #33
0
def sine_sum_signal(time, fmin, n, dt):
    """
    Continuous source-signal
    """
    n = n + 1
    sig = np.sin(2 * pi * fmin * (time))
    for i in range(2, n):
        sig += np.sin(2 * pi * fmin * (i * 0.8) * (time))

    # sig = sig + (np.random.rand(time.size) * 2 - 1)*1 #*np.exp(-0.05*time) + (np.random.rand(time.size) * 2 - 1)*1e-2

    tr = Trace()
    tr.data = sig
    tr.stats.delta = dt
    tr.stats.starttime = 0
    tr.normalize()

    return tr
Beispiel #34
0
    def json2mseed(self, json_local_path, mseed_local_path):

        misal_thresh = self.params["misal_thresh"]
        interp_samp = self.params["interp_samp"]
        network = self.params["network"]

        # create new empty data strem
        st = Stream()

        with open(json_local_path) as f:

            for entry in f:

                record = entry.strip()
                record = ast.literal_eval(record)

                tr = Trace()
                tr.stats.sampling_rate = record["sr"]
                tr.stats.starttime = (UTCDateTime(record["cloud_t"]) -
                                      (len(record["x"]) - 1) / record["sr"])
                tr.stats.station = record["device_id"]
                tr.stats.network = network

                if len(tr.stats.station) > 4:
                    warn("Station name for {} now {} to fit MSEED format".
                         format(tr.stats.station, tr.stats.station[0:4]))
                    tr.stats.station = record["device_id"][0:4]

                for channel in ["x", "y", "z"]:

                    tr.data = np.array(record[channel])
                    tr.stats.channel = "EN" + channel.capitalize()
                    st += tr.copy()

        # align subsample shifts
        st.merge(method=-1, misalignment_threshold=misal_thresh)

        # close overlaps (either discarding or interpolating the overlapping samples)
        st.merge(method=1, fill_value=None, interpolation_samples=interp_samp)
        st = st.split()  # do not return a masked array

        st.write(mseed_local_path, format="MSEED")
Beispiel #35
0
def add_metadata_and_write(correlation, sta1, sta2, output_file, Fs):
    # save output
    trace = Trace()
    trace.stats.sampling_rate = Fs
    trace.data = correlation
    # try to add some meta data
    try:
        trace.stats.station = sta1.split('.')[1]
        trace.stats.network = sta1.split('.')[0]
        trace.stats.location = sta1.split('.')[2]
        trace.stats.channel = sta1.split('.')[3]
        trace.stats.sac = {}
        trace.stats.sac['kuser0'] = sta2.split('.')[1]
        trace.stats.sac['kuser1'] = sta2.split('.')[0]
        trace.stats.sac['kuser2'] = sta2.split('.')[2]
        trace.stats.sac['kevnm'] = sta2.split('.')[3]
    except (KeyError, IndexError):
        pass

    trace.write(filename=output_file, format='SAC')
    return()
Beispiel #36
0
    def _create_stream(self, starttime, endtime, sampling_rate):
        """
        Helper method to create a Stream object that can be used for testing
        waveform plotting.

        Takes the time frame of the Stream to be created and a sampling rate.
        Any other header information will have to be adjusted on a case by case
        basis. Please remember to use the same sampling rate for one Trace as
        merging and plotting will not work otherwise.

        This method will create a single sine curve to a first approximation
        with superimposed 10 smaller sine curves on it.

        :return: Stream object
        """
        time_delta = endtime - starttime
        number_of_samples = int(time_delta * sampling_rate) + 1
        # Calculate first sine wave.
        curve = np.linspace(0, 2 * np.pi, number_of_samples // 2)
        # Superimpose it with a smaller but shorter wavelength sine wave.
        curve = np.sin(curve) + 0.2 * np.sin(10 * curve)
        # To get a thick curve alternate between two curves.
        data = np.empty(number_of_samples)
        # Check if even number and adjust if necessary.
        if number_of_samples % 2 == 0:
            data[0::2] = curve
            data[1::2] = curve + 0.2
        else:
            data[-1] = 0.0
            data[0:-1][0::2] = curve
            data[0:-1][1::2] = curve + 0.2
        tr = Trace()
        tr.stats.starttime = starttime
        tr.stats.sampling_rate = float(sampling_rate)
        # Fill dummy header.
        tr.stats.network = 'BW'
        tr.stats.station = 'OBSPY'
        tr.stats.channel = 'TEST'
        tr.data = data
        return Stream(traces=[tr])
Beispiel #37
0
    def _createStream(self, starttime, endtime, sampling_rate):
        """
        Helper method to create a Stream object that can be used for testing
        waveform plotting.

        Takes the time frame of the Stream to be created and a sampling rate.
        Any other header information will have to be adjusted on a case by case
        basis. Please remember to use the same sampling rate for one Trace as
        merging and plotting will not work otherwise.

        This method will create a single sine curve to a first approximation
        with superimposed 10 smaller sine curves on it.

        :return: Stream object
        """
        time_delta = endtime - starttime
        number_of_samples = time_delta * sampling_rate + 1
        # Calculate first sine wave.
        curve = np.linspace(0, 2 * np.pi, int(number_of_samples // 2))
        # Superimpose it with a smaller but shorter wavelength sine wave.
        curve = np.sin(curve) + 0.2 * np.sin(10 * curve)
        # To get a thick curve alternate between two curves.
        data = np.empty(number_of_samples)
        # Check if even number and adjust if necessary.
        if number_of_samples % 2 == 0:
            data[0::2] = curve
            data[1::2] = curve + 0.2
        else:
            data[-1] = 0.0
            data[0:-1][0::2] = curve
            data[0:-1][1::2] = curve + 0.2
        tr = Trace()
        tr.stats.starttime = starttime
        tr.stats.sampling_rate = float(sampling_rate)
        # Fill dummy header.
        tr.stats.network = 'BW'
        tr.stats.station = 'OBSPY'
        tr.stats.channel = 'TEST'
        tr.data = data
        return Stream(traces=[tr])
Beispiel #38
0
def _convert_adj_to_trace(adj):
    """
    Convert AdjointSource to Trace,for internal use only
    """
    meta = {}

    tr = Trace()
    tr.data = adj.adjoint_source
    tr.stats.starttime = adj.starttime
    tr.stats.delta = adj.dt

    tr.stats.channel = adj.component
    tr.stats.station = adj.station
    tr.stats.network = adj.network
    tr.stats.location = adj.location

    meta["adj_src_type"] = adj.adj_src_type
    meta["misfit"] = adj.misfit
    meta["min_period"] = adj.min_period
    meta["max_period"] = adj.max_period

    return tr, meta
Beispiel #39
0
def convert_adj_to_trace(adj):
    """
    Convert AdjointSource to Trace,for internal use only
    """
    meta = {}

    tr = Trace()
    tr.data = deepcopy(adj.adjoint_source)
    tr.stats.starttime = adj.starttime
    tr.stats.delta = adj.dt

    tr.stats.channel = adj.component
    tr.stats.station = adj.station
    tr.stats.network = adj.network
    tr.stats.location = adj.location

    meta["adj_src_type"] = adj.adj_src_type
    meta["misfit"] = adj.misfit
    meta["min_period"] = adj.min_period
    meta["max_period"] = adj.max_period

    return tr, meta
Beispiel #40
0
def match_filter(template_names, template_list, st, threshold,
                 threshold_type, trig_int, plotvar, plotdir='.', cores=1,
                 tempdir=False, debug=0, plot_format='png',
                 output_cat=False, extract_detections=False,
                 arg_check=True):
    """
    Main matched-filter detection function.
    Over-arching code to run the correlations of given templates with a \
    day of seismic data and output the detections based on a given threshold.
    For a functional example see the tutorials.

    :type template_names: list
    :param template_names: List of template names in the same order as \
        template_list
    :type template_list: list
    :param template_list: A list of templates of which each template is a \
        Stream of obspy traces containing seismic data and header information.
    :type st: obspy.core.stream.Stream
    :param st: An obspy.Stream object containing all the data available and \
        required for the correlations with templates given.  For efficiency \
        this should contain no excess traces which are not in one or more of \
        the templates.  This will now remove excess traces internally, but \
        will copy the stream and work on the copy, leaving your input stream \
        untouched.
    :type threshold: float
    :param threshold: A threshold value set based on the threshold_type
    :type threshold_type: str
    :param threshold_type: The type of threshold to be used, can be MAD, \
        absolute or av_chan_corr.    MAD threshold is calculated as the \
        threshold*(median(abs(cccsum))) where cccsum is the cross-correlation \
        sum for a given template. absolute threhsold is a true absolute \
        threshold based on the cccsum value av_chan_corr is based on the mean \
        values of single-channel cross-correlations assuming all data are \
        present as required for the template, \
        e.g. av_chan_corr_thresh=threshold*(cccsum/len(template)) where \
        template is a single template from the input and the length is the \
        number of channels within this template.
    :type trig_int: float
    :param trig_int: Minimum gap between detections in seconds.
    :type plotvar: bool
    :param plotvar: Turn plotting on or off
    :type plotdir: str
    :param plotdir: Path to plotting folder, plots will be output here, \
        defaults to run location.
    :type tempdir: str
    :param tempdir: Directory to put temporary files, or False
    :type cores: int
    :param cores: Number of cores to use
    :type debug: int
    :param debug: Debug output level, the bigger the number, the more the \
        output.
    :type plot_format: str
    :param plot_format: Specify format of output plots if saved
    :type output_cat: bool
    :param output_cat: Specifies if matched_filter will output an \
        obspy.Catalog class containing events for each detection. Default \
        is False, in which case matched_filter will output a list of \
        detection classes, as normal.
    :type extract_detections: bool
    :param extract_detections: Specifies whether or not to return a list of \
        streams, one stream per detection.
    :type arg_check: bool
    :param arg_check: Check arguments, defaults to True, but if running in \
        bulk, and you are certain of your arguments, then set to False.

    :return: :class: 'DETECTIONS' detections for each channel formatted as \
        :class: 'obspy.UTCDateTime' objects.
    :return: :class: obspy.Catalog containing events for each detection.
    :return: list of :class: obspy.Stream objects for each detection.

    .. note:: Plotting within the match-filter routine uses the Agg backend \
        with interactive plotting turned off.  This is because the function \
        is designed to work in bulk.  If you wish to turn interactive \
        plotting on you must import matplotlib in your script first, when you \
        them import match_filter you will get the warning that this call to \
        matplotlib has no effect, which will mean that match_filter has not \
        changed the plotting behaviour.

    .. note:: The output_cat flag will create an :class: obspy.Catalog \
        containing one event for each :class: 'DETECTIONS' generated by \
        match_filter. Each event will contain a number of comments dealing \
        with correlation values and channels used for the detection. Each \
        channel used for the detection will have a corresponding :class: Pick \
        which will contain time and waveform information. HOWEVER, the user \
        should note that, at present, the pick times do not account for the \
        prepick times inherent in each template. For example, if a template \
        trace starts 0.1 seconds before the actual arrival of that phase, \
        then the pick time generated by match_filter for that phase will be \
        0.1 seconds early. We are looking towards a solution which will \
        involve saving templates alongside associated metadata.
    """
    import matplotlib
    matplotlib.use('Agg')
    import matplotlib.pyplot as plt
    plt.ioff()
    import copy
    from eqcorrscan.utils import plotting
    from eqcorrscan.utils import findpeaks
    from obspy import Trace, Catalog, UTCDateTime, Stream
    from obspy.core.event import Event, Pick, CreationInfo, ResourceIdentifier
    from obspy.core.event import Comment, WaveformStreamID
    import time

    if arg_check:
        # Check the arguments to be nice - if arguments wrong type the parallel
        # output for the error won't be useful
        if not type(template_names) == list:
            raise IOError('template_names must be of type: list')
        if not type(template_list) == list:
            raise IOError('templates must be of type: list')
        for template in template_list:
            if not type(template) == Stream:
                msg = 'template in template_list must be of type: ' +\
                      'obspy.core.stream.Stream'
                raise IOError(msg)
        if not type(st) == Stream:
            msg = 'st must be of type: obspy.core.stream.Stream'
            raise IOError(msg)
        if threshold_type not in ['MAD', 'absolute', 'av_chan_corr']:
            msg = 'threshold_type must be one of: MAD, absolute, av_chan_corr'
            raise IOError(msg)

    # Copy the stream here because we will muck about with it
    stream = st.copy()
    templates = copy.deepcopy(template_list)
    # Debug option to confirm that the channel names match those in the
    # templates
    if debug >= 2:
        template_stachan = []
        data_stachan = []
        for template in templates:
            for tr in template:
                template_stachan.append(tr.stats.station + '.' +
                                        tr.stats.channel)
        for tr in stream:
            data_stachan.append(tr.stats.station + '.' + tr.stats.channel)
        template_stachan = list(set(template_stachan))
        data_stachan = list(set(data_stachan))
        if debug >= 3:
            print('I have template info for these stations:')
            print(template_stachan)
            print('I have daylong data for these stations:')
            print(data_stachan)
    # Perform a check that the daylong vectors are daylong
    for tr in stream:
        if not tr.stats.sampling_rate * 86400 == tr.stats.npts:
            msg = ' '.join(['Data are not daylong for', tr.stats.station,
                            tr.stats.channel])
            raise ValueError(msg)
    # Perform check that all template lengths are internally consistent
    for i, temp in enumerate(template_list):
        if len(set([tr.stats.npts for tr in temp])) > 1:
            msg = 'Template %s contains traces of differing length!! THIS \
                  WILL CAUSE ISSUES' % template_names[i]
            raise ValueError(msg)
    # Call the _template_loop function to do all the correlation work
    outtic = time.clock()
    # Edit here from previous, stable, but slow match_filter
    # Would be worth testing without an if statement, but with every station in
    # the possible template stations having data, but for those without real
    # data make the data NaN to return NaN ccc_sum
    # Note: this works
    if debug >= 2:
        print('Ensuring all template channels have matches in daylong data')
    template_stachan = []
    for template in templates:
        for tr in template:
            template_stachan += [(tr.stats.station, tr.stats.channel)]
    template_stachan = list(set(template_stachan))
    # Copy this here to keep it safe
    for stachan in template_stachan:
        if not stream.select(station=stachan[0], channel=stachan[1]):
            # Remove template traces rather than adding NaN data
            for template in templates:
                if template.select(station=stachan[0], channel=stachan[1]):
                    for tr in template.select(station=stachan[0],
                                              channel=stachan[1]):
                        template.remove(tr)
    # Remove un-needed channels
    for tr in stream:
        if not (tr.stats.station, tr.stats.channel) in template_stachan:
            stream.remove(tr)
    # Also pad out templates to have all channels
    for template, template_name in zip(templates, template_names):
        if len(template) == 0:
            msg = ('No channels matching in continuous data for ' +
                   'template' + template_name)
            warnings.warn(msg)
            templates.remove(template)
            template_names.remove(template_name)
            continue
        for stachan in template_stachan:
            if not template.select(station=stachan[0], channel=stachan[1]):
                nulltrace = Trace()
                nulltrace.stats.station = stachan[0]
                nulltrace.stats.channel = stachan[1]
                nulltrace.stats.sampling_rate = template[0].stats.sampling_rate
                nulltrace.stats.starttime = template[0].stats.starttime
                nulltrace.data = np.array([np.NaN] * len(template[0].data),
                                          dtype=np.float32)
                template += nulltrace
    if debug >= 2:
        print('Starting the correlation run for this day')
    [cccsums, no_chans, chans] = _channel_loop(templates, stream, cores, debug)
    if len(cccsums[0]) == 0:
        raise ValueError('Correlation has not run, zero length cccsum')
    outtoc = time.clock()
    print(' '.join(['Looping over templates and streams took:',
                    str(outtoc - outtic), 's']))
    if debug >= 2:
        print(' '.join(['The shape of the returned cccsums is:',
                        str(np.shape(cccsums))]))
        print(' '.join(['This is from', str(len(templates)), 'templates']))
        print(' '.join(['Correlated with', str(len(stream)),
                        'channels of data']))
    detections = []
    if output_cat:
        det_cat = Catalog()
    for i, cccsum in enumerate(cccsums):
        template = templates[i]
        if threshold_type == 'MAD':
            rawthresh = threshold * np.median(np.abs(cccsum))
        elif threshold_type == 'absolute':
            rawthresh = threshold
        elif threshold_type == 'av_chan_corr':
            rawthresh = threshold * no_chans[i]
        # Findpeaks returns a list of tuples in the form [(cccsum, sample)]
        print(' '.join(['Threshold is set at:', str(rawthresh)]))
        print(' '.join(['Max of data is:', str(max(cccsum))]))
        print(' '.join(['Mean of data is:', str(np.mean(cccsum))]))
        if np.abs(np.mean(cccsum)) > 0.05:
            warnings.warn('Mean is not zero!  Check this!')
        # Set up a trace object for the cccsum as this is easier to plot and
        # maintains timing
        if plotvar:
            stream_plot = copy.deepcopy(stream[0])
            # Downsample for plotting
            stream_plot.decimate(int(stream[0].stats.sampling_rate / 10))
            cccsum_plot = Trace(cccsum)
            cccsum_plot.stats.sampling_rate = stream[0].stats.sampling_rate
            # Resample here to maintain shape better
            cccsum_hist = cccsum_plot.copy()
            cccsum_hist = cccsum_hist.decimate(int(stream[0].stats.
                                                   sampling_rate / 10)).data
            cccsum_plot = plotting.chunk_data(cccsum_plot, 10,
                                              'Maxabs').data
            # Enforce same length
            stream_plot.data = stream_plot.data[0:len(cccsum_plot)]
            cccsum_plot = cccsum_plot[0:len(stream_plot.data)]
            cccsum_hist = cccsum_hist[0:len(stream_plot.data)]
            plotting.triple_plot(cccsum_plot, cccsum_hist,
                                 stream_plot, rawthresh, True,
                                 plotdir + '/cccsum_plot_' +
                                 template_names[i] + '_' +
                                 stream[0].stats.starttime.
                                 datetime.strftime('%Y-%m-%d') +
                                 '.' + plot_format)
            if debug >= 4:
                print(' '.join(['Saved the cccsum to:', template_names[i],
                                stream[0].stats.starttime.datetime.
                                strftime('%Y%j')]))
                np.save(template_names[i] +
                        stream[0].stats.starttime.datetime.strftime('%Y%j'),
                        cccsum)
        tic = time.clock()
        if debug >= 4:
            np.save('cccsum_' + str(i) + '.npy', cccsum)
        if debug >= 3 and max(cccsum) > rawthresh:
            peaks = findpeaks.find_peaks2_short(cccsum, rawthresh,
                                                trig_int * stream[0].stats.
                                                sampling_rate, debug,
                                                stream[0].stats.starttime,
                                                stream[0].stats.sampling_rate)
        elif max(cccsum) > rawthresh:
            peaks = findpeaks.find_peaks2_short(cccsum, rawthresh,
                                                trig_int * stream[0].stats.
                                                sampling_rate, debug)
        else:
            print('No peaks found above threshold')
            peaks = False
        toc = time.clock()
        if debug >= 1:
            print(' '.join(['Finding peaks took:', str(toc - tic), 's']))
        if peaks:
            for peak in peaks:
                detecttime = stream[0].stats.starttime +\
                    peak[1] / stream[0].stats.sampling_rate
                # Detect time must be valid QuakeML uri within resource_id.
                # This will write a formatted string which is still readable by UTCDateTime
                rid = ResourceIdentifier(id=template_names[i] + '_' +
                                         str(detecttime.strftime('%Y%m%dT%H%M%S.%f')),
                                         prefix='smi:local')
                ev = Event(resource_id=rid)
                cr_i = CreationInfo(author='EQcorrscan',
                                    creation_time=UTCDateTime())
                ev.creation_info = cr_i
                # All detection info in Comments for lack of a better idea
                thresh_str = 'threshold=' + str(rawthresh)
                ccc_str = 'detect_val=' + str(peak[0])
                used_chans = 'channels used: ' +\
                             ' '.join([str(pair) for pair in chans[i]])
                ev.comments.append(Comment(text=thresh_str))
                ev.comments.append(Comment(text=ccc_str))
                ev.comments.append(Comment(text=used_chans))
                min_template_tm = min([tr.stats.starttime for tr in template])
                for tr in template:
                    if (tr.stats.station, tr.stats.channel) not in chans[i]:
                        continue
                    else:
                        pick_tm = detecttime + (tr.stats.starttime - min_template_tm)
                        wv_id = WaveformStreamID(network_code=tr.stats.network,
                                                 station_code=tr.stats.station,
                                                 channel_code=tr.stats.channel)
                        ev.picks.append(Pick(time=pick_tm, waveform_id=wv_id))
                detections.append(DETECTION(template_names[i],
                                            detecttime,
                                            no_chans[i], peak[0], rawthresh,
                                            'corr', chans[i], event=ev))
                if output_cat:
                    det_cat.append(ev)
        if extract_detections:
            detection_streams = extract_from_stream(stream, detections)
    del stream, templates
    if output_cat and not extract_detections:
        return detections, det_cat
    elif not extract_detections:
        return detections
    elif extract_detections and not output_cat:
        return detections, detection_streams
    else:
        return detections, det_cat, detection_streams
Beispiel #41
0
def g1g2_corr(wf1,wf2,corr_file,src,source_conf,insta):
    """
    Compute noise cross-correlations from two .h5 'wavefield' files.
    Noise source distribution and spectrum is given by starting_model.h5
    It is assumed that noise sources are delta-correlated in space.
    """
    
    
    #ToDo: check whether to include autocorrs from user (now hardcoded off)
    #ToDo: Parallel loop(s)
    #ToDo tests
    

    # Metainformation: Include the reference station names for both stations
    # from wavefield files, if possible. Do not include geographic information
    # from .csv file as this might be error-prone. Just add the geographic 
    # info later if needed.

    with NoiseSource(src) as nsrc:

        ntime, n, n_corr, Fs = get_ns(wf1,source_conf,insta)

    # use a one-sided taper: The seismogram probably has a non-zero end, 
    # being cut off whereever the solver stopped running.
        taper = cosine_taper(ntime,p=0.01)
        taper[0:ntime//2] = 1.0
        ntraces = nsrc.src_loc[0].shape[0]
        print(taper.shape)
        correlation = np.zeros(n_corr)

        if insta:
            # open database
            dbpath = json.load(open(os.path.join(source_conf['project_path'],
                'config.json')))['wavefield_path']
            # open and determine Fs, nt
            db = instaseis.open_db(dbpath)
            # get receiver locations
            lat1 = geograph_to_geocent(float(wf1[2]))
            lon1 = float(wf1[3])
            rec1 = instaseis.Receiver(latitude=lat1,longitude=lon1)
            lat2 = geograph_to_geocent(float(wf2[2]))
            lon2 = float(wf2[3])
            rec2 = instaseis.Receiver(latitude=lat2,longitude=lon2)

        else:
            wf1 = WaveField(wf1)
            wf2 = WaveField(wf2)

            
        # Loop over source locations
        for i in range(ntraces):

            # noise source spectrum at this location
            S = nsrc.get_spect(i)
            

            if S.sum() == 0.: 
            #If amplitude is 0, continue. (Spectrum has 0 phase anyway. )
                continue

           
            if insta:
            # get source locations
                lat_src = geograph_to_geocent(nsrc.src_loc[1,i])
                lon_src = nsrc.src_loc[0,i]
                fsrc = instaseis.ForceSource(latitude=lat_src,
                    longitude=lon_src,f_r=1.e12)
                
                s1 = np.ascontiguousarray(db.get_seismograms(source=fsrc,
                    receiver=rec1,
                    dt=1./source_conf['sampling_rate'])[0].data*taper)
                s2 = np.ascontiguousarray(db.get_seismograms(source=fsrc,
                    receiver=rec2,
                    dt=1./source_conf['sampling_rate'])[0].data*taper)
                

            else:
            # read Green's functions
                s1 = np.ascontiguousarray(wf1.data[i,:]*taper)
                s2 = np.ascontiguousarray(wf2.data[i,:]*taper)
            
            
            # Fourier transform for greater ease of convolution
            spec1 = np.fft.rfft(s1,n)
            spec2 = np.fft.rfft(s2,n)
            
            # convolve G1G2
            g1g2_tr = np.multiply(np.conjugate(spec1),spec2)
            
            # convolve noise source
            c = np.multiply(g1g2_tr,S)
            
            # transform back    
            correlation += my_centered(np.fft.ifftshift(np.fft.irfft(c,n)),
                n_corr) * nsrc.surf_area[i]
            
            # occasional info
            if i%50000 == 0:
                print("Finished {} source locations.".format(i))
###################### end of loop over all source locations ###################

        if not insta:
            wf1.file.close()
            wf2.file.close()

        # save output
        trace = Trace()
        trace.stats.sampling_rate = Fs
        trace.data = correlation
# try to add some meta data
        try:
            sta1 = wf1.stats['reference_station']
            sta2 = wf2.stats['reference_station']
            trace.stats.station = sta1.split('.')[1]
            trace.stats.network = sta1.split('.')[0]
            trace.stats.location = sta1.split('.')[2]
            trace.stats.channel = sta1.split('.')[3]
            trace.stats.sac = {}
            trace.stats.sac['kuser0']  =   sta2.split('.')[1]
            trace.stats.sac['kuser1']  =   sta2.split('.')[0]
            trace.stats.sac['kuser2']  =  sta2.split('.')[2]
            trace.stats.sac['kevnm']   =   sta2.split('.')[3]
        except:
            pass

        trace.write(filename=corr_file,format='SAC')
         8 + ntimesteps * size_of_float + 12 )

        #for nt in range(ntimesteps):
        #    values[nt] = np.fromfile(f_in,dtype=dtype_output,count=1)
        values = np.fromfile(f_in, dtype=dtype_output, count=ntimesteps)

        tr = Trace(data=values)

        # Filter and downsample
        # Since the same filter will be applied to all synthetics consistently, non-zero-phase should be okay
        # ToDo: Think about whether zerophase would be better

        # taper first
        #ToDo: Discuss with Andreas whether this tapering makes sense!
        tr.taper(type='cosine', max_percentage=0.001)
        tr.data = sosfilt(sos, tr.data)
        tr.stats.sampling_rate = fs_old
        tr.interpolate(fs_new)

        # Differentiate
        if output_quantity == 'VEL' or output_quantity == 'ACC':
            tr.differentiate()
            if output_quantity == 'ACC':
                tr.differentiate()

        # Remove the extra time that specfem added
        tr.trim(starttime=tr.stats.starttime + offset_seconds)

        # Set data type
        tr.data = tr.data.astype(dtype_output)
Beispiel #43
0
def _read_su(filename, headonly=False, byteorder=None,
             unpack_trace_headers=False, **kwargs):  # @UnusedVariable
    """
    Reads a Seismic Unix (SU) file and returns an ObsPy Stream object.

    .. warning::
        This function should NOT be called directly, it registers via the
        ObsPy :func:`~obspy.core.stream.read` function, call this instead.

    :type filename: str
    :param filename: SU file to be read.
    :type headonly: bool, optional
    :param headonly: If set to True, read only the header and omit the waveform
        data.
    :type byteorder: str or ``None``
    :param byteorder: Determines the endianness of the file. Either ``'>'`` for
        big endian or ``'<'`` for little endian. If it is ``None``, it will try
        to autodetect the endianness. The endianness is always valid for the
        whole file. Defaults to ``None``.
    :type unpack_trace_headers: bool, optional
    :param unpack_trace_headers: Determines whether or not all trace header
        values will be unpacked during reading. If ``False`` it will greatly
        enhance performance and especially memory usage with large files. The
        header values can still be accessed and will be calculated on the fly
        but tab completion will no longer work. Look in the headers.py for a
        list of all possible trace header values. Defaults to ``False``.
    :returns: A ObsPy :class:`~obspy.core.stream.Stream` object.

    .. rubric:: Example

    >>> from obspy import read
    >>> st = read("/path/to/1.su_first_trace")
    >>> st #doctest: +ELLIPSIS
    <obspy.core.stream.Stream object at 0x...>
    >>> print(st)  #doctest: +ELLIPSIS
    1 Trace(s) in Stream:
    ... | 2005-12-19T15:07:54.000000Z - ... | 4000.0 Hz, 8000 samples
    """
    # Read file to the internal segy representation.
    su_object = _read_suFile(filename, endian=byteorder,
                             unpack_headers=unpack_trace_headers)

    # Create the stream object.
    stream = Stream()

    # Get the endianness from the first trace.
    endian = su_object.traces[0].endian
    # Loop over all traces.
    for tr in su_object.traces:
        # Create new Trace object for every segy trace and append to the Stream
        # object.
        trace = Trace()
        stream.append(trace)
        # skip data if headonly is set
        if headonly:
            trace.stats.npts = tr.npts
        else:
            trace.data = tr.data
        trace.stats.su = AttribDict()
        # If all values will be unpacked create a normal dictionary.
        if unpack_trace_headers:
            # Add the trace header as a new attrib dictionary.
            header = AttribDict()
            for key, value in tr.header.__dict__.items():
                setattr(header, key, value)
        # Otherwise use the LazyTraceHeaderAttribDict.
        else:
            # Add the trace header as a new lazy attrib dictionary.
            header = LazyTraceHeaderAttribDict(tr.header.unpacked_header,
                                               tr.header.endian)
        trace.stats.su.trace_header = header
        # Also set the endianness.
        trace.stats.su.endian = endian
        # The sampling rate should be set for every trace. It is a sample
        # interval in microseconds. The only sanity check is that is should be
        # larger than 0.
        tr_header = trace.stats.su.trace_header
        if tr_header.sample_interval_in_ms_for_this_trace > 0:
            trace.stats.delta = \
                float(tr.header.sample_interval_in_ms_for_this_trace) / \
                1E6
        # If the year is not zero, calculate the start time. The end time is
        # then calculated from the start time and the sampling rate.
        # 99 is often used as a placeholder.
        if tr_header.year_data_recorded > 0:
            year = tr_header.year_data_recorded
            # The SEG Y rev 0 standard specifies the year to be a 4 digit
            # number.  Before that it was unclear if it should be a 2 or 4
            # digit number. Old or wrong software might still write 2 digit
            # years. Every number <30 will be mapped to 2000-2029 and every
            # number between 30 and 99 will be mapped to 1930-1999.
            if year < 100:
                if year < 30:
                    year += 2000
                else:
                    year += 1900
            julday = tr_header.day_of_year
            julday = tr_header.day_of_year
            hour = tr_header.hour_of_day
            minute = tr_header.minute_of_hour
            second = tr_header.second_of_minute
            trace.stats.starttime = UTCDateTime(
                year=year, julday=julday, hour=hour, minute=minute,
                second=second)
    return stream
Beispiel #44
0
 
 #for nt in range(ntimesteps):
 #    values[nt] = np.fromfile(f_in,dtype=dtype_output,count=1)
 values = np.fromfile(f_in,dtype=dtype_output,count=ntimesteps)
 
 tr = Trace(data=values)
 
 
 # Filter and downsample
 # Since the same filter will be applied to all synthetics consistently, non-zero-phase should be okay
 # ToDo: Think about whether zerophase would be better
 
 # taper first
 #ToDo: Discuss with Andreas whether this tapering makes sense!
 tr.taper(type='cosine',max_percentage=0.001)
 tr.data = sosfilt(sos,tr.data)
 tr.stats.sampling_rate = fs_old
 tr.interpolate(fs_new)
 
 # Differentiate
 if output_quantity == 'VEL' or output_quantity == 'ACC':
     tr.differentiate()
     if output_quantity == 'ACC':
         tr.differentiate()
 
 
 # Remove the extra time that specfem added
 tr.trim(starttime = tr.stats.starttime+offset_seconds)
 
 # Set data type
 tr.data = tr.data.astype(dtype_output)
Beispiel #45
0
def pro5stack2d(eq_num,
                slow_delta=0.0005,
                slowR_lo=-0.1,
                slowR_hi=0.1,
                slowT_lo=-0.1,
                slowT_hi=0.1,
                start_buff=-50,
                end_buff=50,
                norm=1,
                ARRAY=0,
                NS=False,
                decimate_fac=0,
                ref_loc=0,
                ref_lat=36.3,
                ref_lon=138.5,
                stack_option=1):

    from obspy import UTCDateTime
    from obspy import Stream, Trace
    from obspy import read
    from obspy.geodetics import gps2dist_azimuth
    import numpy as np
    import os
    from scipy.signal import hilbert
    import math
    import time

    import sys  # don't show any warnings
    import warnings
    from termcolor import colored
    print(colored('Running pro5b_stack2d', 'cyan'))

    env_stack = 0  # flag to stack envelopes instead of oscillating seismograms
    start_time_wc = time.time()

    fname = '/Users/vidale/Documents/Research/IC/EvLocs/event' + str(
        eq_num) + '.txt'
    file = open(fname, 'r')

    lines = file.readlines()
    split_line = lines[0].split()
    #            ids.append(split_line[0])  ignore label for now
    t = UTCDateTime(split_line[1])
    date_label = split_line[1][0:10]
    ev_lat = float(split_line[2])
    ev_lon = float(split_line[3])
    #    ev_depth    = float(      split_line[4])

    if not sys.warnoptions:
        warnings.simplefilter("ignore")

#%% Get location file
    if ARRAY == 0:  # Hinet set
        sta_file = '/Users/vidale/Documents/GitHub/Array_codes/Files/sta_hinet.txt'
        if ref_loc == 0:
            ref_lat = 36.3
            ref_lon = 138.5
    elif ARRAY == 1:  # LASA set
        sta_file = '/Users/vidale/Documents/GitHub/Array_codes/Files/sta_LASA.txt'
        if ref_loc == 0:
            ref_lat = 46.69
            ref_lon = -106.22
    elif ARRAY == 2:  # China set and center
        sta_file = '/Users/vidale/Documents/GitHub/Array_codes/Files/sta_ch.txt'
        if ref_loc == 0:
            ref_lat = 38  # °N
            ref_lon = 104.5  # °E
    with open(sta_file, 'r') as file:
        lines = file.readlines()
    print(str(len(lines)) + ' stations read from ' + sta_file)
    # Load station coords into arrays
    station_index = range(len(lines))
    st_names = []
    st_lats = []
    st_lons = []
    for ii in station_index:
        line = lines[ii]
        split_line = line.split()
        st_names.append(split_line[0])
        st_lats.append(split_line[1])
        st_lons.append(split_line[2])
    if ARRAY == 0:  # shorten and make upper case Hi-net station names to match station list
        for ii in station_index:
            this_name = st_names[ii]
            this_name_truc = this_name[0:5]
            st_names[ii] = this_name_truc.upper()

#%% Input parameters
# date_label = '2018-04-02' # date for filename
    fname = 'HD' + date_label + 'sel.mseed'
    goto = '/Users/vidale/Documents/Research/IC/Pro_Files'
    os.chdir(goto)

    st = Stream()
    st = read(fname)
    print('Read in: ' + str(len(st)) + ' traces')
    nt = len(st[0].data)
    dt = st[0].stats.delta
    print('First trace has : ' + str(nt) + ' time pts, time sampling of ' +
          str(dt) + ' and thus duration of ' + str((nt - 1) * dt))

    #%% Make grid of slownesses
    slowR_n = int(
        round(1 + (slowR_hi - slowR_lo) / slow_delta))  # number of slownesses
    slowT_n = int(
        round(1 + (slowT_hi - slowT_lo) / slow_delta))  # number of slownesses
    stack_nt = int(
        round(1 + ((end_buff - start_buff) / dt)))  # number of time points

    # In English, stack_slows = range(slow_n) * slow_delta - slow_lo
    a1R = range(slowR_n)
    a1T = range(slowT_n)
    stack_Rslows = [(x * slow_delta + slowR_lo) for x in a1R]
    stack_Tslows = [(x * slow_delta + slowT_lo) for x in a1T]

    # testing slownesses in indexing
    print(
        str(slowR_n) + ' radial slownesses, ' + str(slowT_n) +
        ' trans slownesses, ')
    print('Radial     slownesses 0' + ' ' + str(stack_Rslows[0]) + '   '
          'end' + ' ' + str(stack_Rslows[-1]))
    print('Transverse slownesses 1' + ' ' + str(stack_Tslows[0]) + '   '
          'end' + ' ' + str(stack_Tslows[-1]))

    #%% Build empty Stack array
    stack = Stream()
    tr = Trace()
    tr.stats.delta = dt
    tr.stats.starttime = t + start_buff
    tr.stats.npts = stack_nt
    tr.stats.network = 'stack'
    tr.stats.channel = 'BHZ'
    tr.data = np.zeros(stack_nt)
    done = 0
    for stackR_one in stack_Rslows:
        for stackT_one in stack_Tslows:
            tr1 = tr.copy()
            tr1.stats.station = str(int(round(done)))
            stack.extend([tr1])
            done += 1

    #  Only need to compute ref location to event distance once
    ref_dist_az = gps2dist_azimuth(ev_lat, ev_lon, ref_lat, ref_lon)
    ref_back_az = ref_dist_az[2]

    #%% select by distance, window and adjust start time to align picked times
    done = 0
    if env_stack == 1:
        for tr in st:  #  #convert oscillating seismograms to envelopes
            tr.data = np.abs(hilbert(tr.data))

    for tr in st:  # traces one by one, find lat-lon by searching entire inventory.  Inefficient but cheap
        if tr.stats.station in st_names:  # find station in station list
            ii = st_names.index(tr.stats.station)
            if norm == 1:
                tr.normalize()  # trace divided abs(max of trace)
            stalat = float(st_lats[ii])
            stalon = float(
                st_lons[ii])  # use lat & lon to find distance and back-az
            rel_dist_az = gps2dist_azimuth(stalat, stalon, ref_lat, ref_lon)
            rel_dist = rel_dist_az[0] / 1000  # km
            rel_back_az = rel_dist_az[1]  # radians

            if NS == False:
                del_distR = rel_dist * math.cos(
                    (rel_back_az - ref_back_az) * math.pi / 180)
                del_distT = rel_dist * math.sin(
                    (rel_back_az - ref_back_az) * math.pi / 180)
            # North and east
            else:
                del_distR = rel_dist * math.cos(rel_back_az * math.pi / 180)
                del_distT = rel_dist * math.sin(rel_back_az * math.pi / 180)
            for slowR_i in range(
                    slowR_n):  # for this station, loop over radial slownesses
                for slowT_i in range(
                        slowT_n):  # loop over transverse slownesses
                    time_lag = del_distR * stack_Rslows[
                        slowR_i]  # time shift due to radial slowness
                    time_lag += del_distT * stack_Tslows[
                        slowT_i]  # time shift due to transverse slowness
                    time_correction = ((t - tr.stats.starttime) +
                                       (time_lag + start_buff)) / dt
                    indx = int(round(slowR_i * slowT_n + slowT_i))
                    # could do a little better by sampling finer, 20 sps?, before applying statics in pro3

                    if stack_option == 0:  # my old inefficient method
                        for it in range(
                                stack_nt):  # check points one at a time
                            it_in = int(round(it + time_correction))
                            if it_in >= 0 and it_in < nt - 1:  # does data lie within seismogram?
                                stack[indx].data[it] += tr[it_in]

                    if stack_option == 1:  #  Wei's much faster method
                        arr = tr.data
                        nshift = round(time_correction)
                        if time_correction < 0:
                            nshift = nshift - 1
                        if nshift <= 0:
                            nbeg1 = -nshift
                            nend1 = stack_nt
                            nbeg2 = 0
                            nend2 = stack_nt + nshift
                        elif nshift > 0:
                            nbeg1 = 0
                            nend1 = stack_nt - nshift
                            nbeg2 = nshift
                            nend2 = stack_nt
                        if nend1 >= 0 and nbeg1 <= stack_nt:
                            stack[indx].data[nbeg1:nend1] += arr[nbeg2:nend2]
            done += 1
            if done % 100 == 0:
                print('Done stacking ' + str(done) + ' out of ' +
                      str(len(st)) + ' stations.')
        else:
            print(tr.stats.station + ' not found in station list')

#%% take envelope, decimate envelope
    stack_raw = stack.copy()
    for slowR_i in range(slowR_n):  # loop over radial slownesses
        for slowT_i in range(slowT_n):  # loop over transverse slownesses
            indx = slowR_i * slowT_n + slowT_i
            stack[indx].data = np.abs(hilbert(stack[indx].data))
            if decimate_fac != 0:
                stack[indx].decimate(decimate_fac, no_filter=True)

#%%  Save processed files
    fname = 'HD' + date_label + '_2dstack_env.mseed'
    stack.write(fname, format='MSEED')

    fname = 'HD' + date_label + '_2dstack.mseed'
    stack_raw.write(fname, format='MSEED')

    elapsed_time_wc = time.time() - start_time_wc
    print(f'This job took   {elapsed_time_wc:.1f}   seconds')
    os.system('say "Done"')
def match_filter(
    template_names, templates, stream, threshold, threshold_type, trig_int, plotvar, cores=1, tempdir=False, debug=0
):
    """
    Over-arching code to run the correlations of given templates with a day of
    seismic data and output the detections based on a given threshold.

    :type templates: list :class: 'obspy.Stream'
    :param templates: A list of templates of which each template is a Stream of\
        obspy traces containing seismic data and header information.
    :type stream: :class: 'obspy.Stream'
    :param stream: An obspy.Stream object containing all the data available and\
        required for the correlations with templates given.  For efficiency this\
        should contain no excess traces which are not in one or more of the\
        templates.
    :type threshold: float
    :param threshold: A threshold value set based on the threshold_type
    :type threshold_type: str
    :param threshold_type: The type of threshold to be used, can be MAD,\
        absolute or av_chan_corr.    MAD threshold is calculated as the\
        threshold*(median(abs(cccsum))) where cccsum is the cross-correlation\
        sum for a given template. absolute threhsold is a true absolute\
        threshold based on the cccsum value av_chan_corr is based on the mean\
        values of single-channel cross-correlations assuming all data are\
        present as required for the template, \
        e.g. av_chan_corr_thresh=threshold*(cccsum/len(template)) where\
        template is a single template from the input and the length is the\
        number of channels within this template.
    :type trig_int: float
    :param trig_int: Minimum gap between detections in seconds.
    :type tempdir: String or False
    :param tempdir: Directory to put temporary files, or False
    :type cores: int
    :param cores: Number of cores to use
    :type debug: int
    :param debug: Debug output level, the bigger the number, the more the output

    :return: :class: 'DETECTIONS' detections for each channel formatted as\
    :class: 'obspy.UTCDateTime' objects.

    """
    from eqcorrscan.utils import findpeaks, EQcorrscan_plotting
    import time, copy
    from obspy import Trace

    match_internal = False  # Set to True if memory is an issue, if True, will only
    # use about the same amount of memory as the seismic dat
    # take up.  If False, it will use 20-100GB per instance
    # Debug option to confirm that the channel names match those in the templates
    if debug >= 2:
        template_stachan = []
        data_stachan = []
        for template in templates:
            for tr in template:
                template_stachan.append(tr.stats.station + "." + tr.stats.channel)
        for tr in stream:
            data_stachan.append(tr.stats.station + "." + tr.stats.channel)
        template_stachan = list(set(template_stachan))
        data_stachan = list(set(data_stachan))
        if debug >= 3:
            print "I have template info for these stations:"
            print template_stachan
            print "I have daylong data for these stations:"
            print data_stachan
    # Perform a check that the daylong vectors are daylong
    for tr in stream:
        if not tr.stats.sampling_rate * 86400 == tr.stats.npts:
            raise ValueError("Data are not daylong for " + tr.stats.station + "." + tr.stats.channel)
    # Call the _template_loop function to do all the correlation work
    outtic = time.clock()
    # Edit here from previous, stable, but slow match_filter
    # Would be worth testing without an if statement, but with every station in
    # the possible template stations having data, but for those without real
    # data make the data NaN to return NaN ccc_sum
    if debug >= 2:
        print "Ensuring all template channels have matches in daylong data"
    template_stachan = []
    for template in templates:
        for tr in template:
            template_stachan += [(tr.stats.station, tr.stats.channel)]
    template_stachan = list(set(template_stachan))
    # Copy this here to keep it safe
    for stachan in template_stachan:
        if not stream.select(station=stachan[0], channel=stachan[1]):
            # Add a trace of NaN's
            nulltrace = Trace()
            nulltrace.stats.station = stachan[0]
            nulltrace.stats.channel = stachan[1]
            nulltrace.stats.sampling_rate = stream[0].stats.sampling_rate
            nulltrace.stats.starttime = stream[0].stats.starttime
            nulltrace.data = np.array([np.NaN] * len(stream[0].data), dtype=np.float32)
            stream += nulltrace
    # Also pad out templates to have all channels
    for template in templates:
        for stachan in template_stachan:
            if not template.select(station=stachan[0], channel=stachan[1]):
                nulltrace = Trace()
                nulltrace.stats.station = stachan[0]
                nulltrace.stats.channel = stachan[1]
                nulltrace.stats.sampling_rate = template[0].stats.sampling_rate
                nulltrace.stats.starttime = template[0].stats.starttime
                nulltrace.data = np.array([np.NaN] * len(template[0].data), dtype=np.float32)
                template += nulltrace

    if debug >= 2:
        print "Starting the correlation run for this day"
    if match_internal:
        [cccsums, no_chans] = run_channel_loop(templates, stream, tempdir)
    else:
        [cccsums, no_chans] = _channel_loop(templates, stream, cores, debug)
    if len(cccsums[0]) == 0:
        raise ValueError("Correlation has not run, zero length cccsum")
    outtoc = time.clock()
    print "Looping over templates and streams took: " + str(outtoc - outtic) + " s"
    if debug >= 2:
        print "The shape of the returned cccsums is: " + str(np.shape(cccsums))
        print "This is from " + str(len(templates)) + " templates"
        print "Correlated with " + str(len(stream)) + " channels of data"
    i = 0
    detections = []
    for cccsum in cccsums:
        template = templates[i]
        if threshold_type == "MAD":
            rawthresh = threshold * np.median(np.abs(cccsum))
        elif threshold_type == "absolute":
            rawthresh = threshold
        elif threshold == "av_chan_corr":
            rawthresh = threshold * (cccsum / len(template))
        else:
            print "You have not selected the correct threshold type, I will use MAD as I like it"
            rawthresh = threshold * np.mean(np.abs(cccsum))
        # Findpeaks returns a list of tuples in the form [(cccsum, sample)]
        print "Threshold is set at: " + str(rawthresh)
        print "Max of data is: " + str(max(cccsum))
        print "Mean of data is: " + str(np.mean(cccsum))
        if np.abs(np.mean(cccsum)) > 0.05:
            warnings.warn("Mean is not zero!  Check this!")
        # Set up a trace object for the cccsum as this is easier to plot and
        # maintins timing
        if plotvar:
            stream_plot = copy.deepcopy(stream[0])
            # Downsample for plotting
            stream_plot.decimate(int(stream[0].stats.sampling_rate / 20))
            cccsum_plot = Trace(cccsum)
            cccsum_plot.stats.sampling_rate = stream[0].stats.sampling_rate
            # Resample here to maintain shape better
            cccsum_hist = cccsum_plot.copy()
            cccsum_hist = cccsum_hist.decimate(int(stream[0].stats.sampling_rate / 20)).data
            cccsum_plot = EQcorrscan_plotting.chunk_data(cccsum_plot, 20, "Maxabs").data
            # Enforce same length
            stream_plot.data = stream_plot.data[0 : len(cccsum_plot)]
            cccsum_plot = cccsum_plot[0 : len(stream_plot.data)]
            cccsum_hist = cccsum_hist[0 : len(stream_plot.data)]
            EQcorrscan_plotting.triple_plot(
                cccsum_plot,
                cccsum_hist,
                stream_plot,
                rawthresh,
                True,
                "plot/cccsum_plot_"
                + template_names[i]
                + "_"
                + str(stream[0].stats.starttime.year)
                + "-"
                + str(stream[0].stats.starttime.month)
                + "-"
                + str(stream[0].stats.starttime.day)
                + ".jpg",
            )
            np.save(template_names[i] + stream[0].stats.starttime.datetime.strftime("%Y%j"), cccsum)
        tic = time.clock()
        if debug >= 4:
            np.save("cccsum_" + str(i) + ".npy", cccsum)
        if debug >= 3 and max(cccsum) > rawthresh:
            peaks = findpeaks.find_peaks2_short(
                cccsum,
                rawthresh,
                trig_int * stream[0].stats.sampling_rate,
                debug,
                stream[0].stats.starttime,
                stream[0].stats.sampling_rate,
            )
        elif max(cccsum) > rawthresh:
            peaks = findpeaks.find_peaks2_short(cccsum, rawthresh, trig_int * stream[0].stats.sampling_rate, debug)
        else:
            print "No peaks found above threshold"
            peaks = False
        toc = time.clock()
        if debug >= 1:
            print "Finding peaks took: " + str(toc - tic) + " s"
        if peaks:
            for peak in peaks:
                detecttime = stream[0].stats.starttime + peak[1] / stream[0].stats.sampling_rate
                detections.append(DETECTION(template_names[i], detecttime, no_chans[i], peak[0], rawthresh, "corr"))
        i += 1

    return detections
Beispiel #47
0
def stochastic_simulation(home,project_name,rupture_name,sta,sta_lon,sta_lat,component,model_name,
        rise_time_depths,moho_depth_in_km,total_duration=100,hf_dt=0.01,stress_parameter=50,
        kappa=0.04,Qexp=0.6,Pwave=False,high_stress_depth=1e4): 
    '''
    Run stochastic HF sims
    
    stress parameter is in bars
    '''
    
    from numpy import genfromtxt,pi,logspace,log10,mean,where,exp,arange,zeros,argmin,rad2deg,arctan2,real
    from pyproj import Geod
    from obspy.geodetics import kilometer2degrees
    from obspy.taup import TauPyModel
    from mudpy.forward import get_mu, write_fakequakes_hf_waveforms_one_by_one,read_fakequakes_hypo_time
    from obspy import Stream,Trace
    from sys import stdout
    import warnings


    #print out what's going on:
    out='''Running with input parameters:
    home = %s
    project_name = %s
    rupture_name = %s
    sta = %s
    sta_lon = %s
    sta_lat = %s
    model_name = %s
    rise_time_depths = %s
    moho_depth_in_km = %s
    total_duration = %s
    hf_dt = %s
    stress_parameter = %s
    kappa = %s
    Qexp = %s
    component = %s
    Pwave = %s
    high_stress_depth = %s
    '''%(home,project_name,rupture_name,sta,str(sta_lon),str(sta_lat),model_name,str(rise_time_depths),
    str(moho_depth_in_km),str(total_duration),str(hf_dt),str(stress_parameter),
    str(kappa),str(Qexp),str(component),str(Pwave),str(high_stress_depth))
    print out

#    rupture=rupture_name.split('.')[0]+'.'+rupture_name.split('.')[1]
#    log=home+project_name+'/output/waveforms/'+rupture+'/'+sta+'.HN'+component+'.1cpu.log'
#    logfile=open(log,'w')
#    logfile.write(out)
    #print 'stress is '+str(stress_parameter)

    #I don't condone it but this cleans up the warnings
    warnings.filterwarnings("ignore")
    
    #Load the source
    fault=genfromtxt(home+project_name+'/output/ruptures/'+rupture_name)    
    
    #Onset times for each subfault
    onset_times=fault[:,12]
    
    #load velocity structure
    structure=genfromtxt(home+project_name+'/structure/'+model_name)
    
    #Frequencies vector
    f=logspace(log10(hf_dt),log10(1/(2*hf_dt))+0.01,50)
    omega=2*pi*f
    
    #Output time vector (0 is origin time)
    t=arange(0,total_duration,hf_dt)
    
    #Projection object for distance calculations
    g=Geod(ellps='WGS84')
    
    #Create taup velocity model object, paste on top of iaspei91
    #taup_create.build_taup_model(home+project_name+'/structure/bbp_norcal.tvel',output_folder=home+project_name+'/structure/')
    velmod=TauPyModel(model=home+project_name+'/structure/maule',verbose=True)
    #Get epicentral time
    epicenter,time_epi=read_fakequakes_hypo_time(home,project_name,rupture_name)
    
    #Moments
    slip=(fault[:,8]**2+fault[:,9]**2)**0.5
    subfault_M0=slip*fault[:,10]*fault[:,11]*fault[:,13]
    subfault_M0=subfault_M0*1e7 #to dyne-cm
    M0=subfault_M0.sum()
    relative_subfault_M0=subfault_M0/M0
    Mw=(2./3)*(log10(M0*1e-7)-9.1)
    
    #Corner frequency scaling
    i=where(slip>0)[0] #Non-zero faults
    N=len(i) #number of subfaults
    dl=mean((fault[:,10]+fault[:,11])/2) #predominant length scale
    dl=dl/1000 # to km
    
    #Tau=p perturbation
    tau_perturb=0.1
    
    #Deep faults receive a higher stress
    stress_multiplier=3
         
    print '... working on '+component+' component semistochastic waveform for station '+sta

    #initalize output seismogram
    tr=Trace()
    tr.stats.station=sta
    tr.stats.delta=hf_dt
    tr.stats.starttime=time_epi
    #info for sac header (added at the end)
    az,backaz,dist_m=g.inv(epicenter[0],epicenter[1],sta_lon,sta_lat)
    dist_in_km=dist_m/1000.    
    
    hf=zeros(len(t))
    
#    out='''Parameters before we get into subfault calculations:
#    rupture_name = %s
#    epicenter = %s
#    time_epi = %s
#    M0 = %E
#    Mw = %10.4f
#    Num_Subfaults = %i
#    dl = %.2f
#    Dist_in_km = %10.4f
#    '''%(rupture_name,str(epicenter),str(time_epi),M0,Mw,int(N),dl,dist_in_km)
#    print out
#    logfile.write(out)
    
    #Loop over subfaults
#    earliestP=1e10  #something outrageously high
#    earliestP_kfault=1e10
    for kfault in range(len(fault)):
        
        #Print status to screen            
        if kfault % 150 == 0:
            if kfault==0:
                stdout.write('      [')
                stdout.flush()
            stdout.write('.')
            stdout.flush()
        if kfault==len(fault)-1:
            stdout.write(']\n')
            stdout.flush()                
        
        #Include only subfaults with non-zero slip
        if subfault_M0[kfault]>0:
            
            #Get subfault to station distance
            lon_source=fault[kfault,1]
            lat_source=fault[kfault,2]
            azimuth,baz,dist=g.inv(lon_source,lat_source,sta_lon,sta_lat)
            dist_in_degs=kilometer2degrees(dist/1000.)
            
            #Source depth?
            z_source=fault[kfault,3]
            
            #No change
            stress=stress_parameter
            
            #Is subfault in an SMGA?
            #radius_in_km=15.0
            #smga_center_lon=-69.709200
            #smga_center_lat=-19.683600
            #in_smga=is_subfault_in_smga(lon_source,lat_source,smga_center_lon,smga_center_lat,radius_in_km)
            #
            ###Apply multiplier?
            #if in_smga==True:
            #    stress=stress_parameter*stress_multiplier
            #    print "%.4f,%.4f is in SMGA, stress is %d" % (lon_source,lat_source,stress)
            #else:
            #    stress=stress_parameter
            
            #Apply multiplier?
            #if slip[kfault]>7.5:
            #    stress=stress_parameter*stress_multiplier
            ##elif lon_source>-72.057 and lon_source<-71.2 and lat_source>-30.28:
            ##    stress=stress_parameter*stress_multiplier
            #else:
            #    stress=stress_parameter
                
            #Apply multiplier?
            #if z_source>high_stress_depth:
            #    stress=stress_parameter*stress_multiplier
            #else:
            #    stress=stress_parameter
            
            # Frankel 95 scaling of corner frequency #verified this looks the same in GP
            # Right now this applies the same factor to all faults
            fc_scale=(M0)/(N*stress*dl**3*1e21) #Frankel scaling
            small_event_M0 = stress*dl**3*1e21
            
        

            
            #Get rho, alpha, beta at subfault depth
            zs=fault[kfault,3]
            mu,alpha,beta=get_mu(structure,zs,return_speeds=True)
            rho=mu/beta**2
            
            #Get radiation scale factor
            Spartition=1/2**0.5
            if component=='N' :
                component_angle=0
            elif component=='E':
                component_angle=90
            
            rho=rho/1000 #to g/cm**3
            beta=(beta/1000)*1e5 #to cm/s
            alpha=(alpha/1000)*1e5
            
            #Verified this produces same value as in GP
            CS=(2*Spartition)/(4*pi*(rho)*(beta**3))
            CP=2/(4*pi*(rho)*(alpha**3))
            
            #Get local subfault rupture speed
            beta=beta/100 #to m/s
            vr=get_local_rupture_speed(zs,beta,rise_time_depths)
            vr=vr/1000 #to km/s
            dip_factor=get_dip_factor(fault[kfault,5],fault[kfault,8],fault[kfault,9])
            
            #Subfault corner frequency
            c0=2.0 #GP2015 value
            fc_subfault=(c0*vr)/(dip_factor*pi*dl)
            
            #get subfault source spectrum
            #S=((relative_subfault_M0[kfault]*M0/N)*f**2)/(1+fc_scale*(f/fc_subfault)**2)
            S=small_event_M0*(omega**2/(1+(f/fc_subfault)**2))
            frankel_conv_operator= fc_scale*((fc_subfault**2+f**2)/(fc_subfault**2+fc_scale*f**2))
            S=S*frankel_conv_operator
            
            #get high frequency decay
            P=exp(-pi*kappa*f)
            
            #get quarter wavelength amplificationf actors
            # pass rho in kg/m^3 (this units nightmare is what I get for following Graves' code)
            I=get_amplification_factors(f,structure,zs,beta,rho*1000)
            
#            if kfault==0:
#                out='''Parameters within subfault calculations:
#                kfault_lon = %10.4f
#                kfault_lat = %10.4f
#                CS = %s
#                CP = %s
#                S[0] = %s
#                frankel_conv_operator[0] = %s
#                '''%(fault[kfault,1],fault[kfault,2],str(CS),str(CP),str(S[0]),str(frankel_conv_operator[0]))
#                print out
#                logfile.write(out)
            
            #Get other geometric parameters necessar for radiation pattern
            strike=fault[kfault,4]
            dip=fault[kfault,5]
            ss=fault[kfault,8]
            ds=fault[kfault,9]
            rake=rad2deg(arctan2(ds,ss))
            
            #Get ray paths for all direct P arrivals
            Ppaths=velmod.get_ray_paths(zs,dist_in_degs,phase_list=['P','p'])
            
            #Get ray paths for all direct S arrivals
            try:
                Spaths=velmod.get_ray_paths(zs,dist_in_degs,phase_list=['S','s'])
            except:
                Spaths=velmod.get_ray_paths(zs+tau_perturb,dist_in_degs,phase_list=['S','s'])
                
            #sometimes there's no S, weird I know. Check twice.
            if len(Spaths)==0:
                Spaths=velmod.get_ray_paths(zs+tau_perturb,dist_in_degs,phase_list=['S','s'])
            if len(Spaths)==0:
                Spaths=velmod.get_ray_paths(zs+5*tau_perturb,dist_in_degs,phase_list=['S','s'])   
            if len(Spaths)==0:
                Spaths=velmod.get_ray_paths(zs-5*tau_perturb,dist_in_degs,phase_list=['S','s'])
            if len(Spaths)==0:
                Spaths=velmod.get_ray_paths(zs+5*tau_perturb,dist_in_degs,phase_list=['S','s'])  
            if len(Spaths)==0:
                Spaths=velmod.get_ray_paths(zs-10*tau_perturb,dist_in_degs,phase_list=['S','s'])
            if len(Spaths)==0:
                Spaths=velmod.get_ray_paths(zs+10*tau_perturb,dist_in_degs,phase_list=['S','s']) 
            if len(Spaths)==0:
                Spaths=velmod.get_ray_paths(zs-50*tau_perturb,dist_in_degs,phase_list=['S','s'])
            if len(Spaths)==0:
                Spaths=velmod.get_ray_paths(zs+50*tau_perturb,dist_in_degs,phase_list=['S','s']) 
            if len(Spaths)==0:
                Spaths=velmod.get_ray_paths(zs-75*tau_perturb,dist_in_degs,phase_list=['S','s'])
            if len(Spaths)==0:
                Spaths=velmod.get_ray_paths(zs+75*tau_perturb,dist_in_degs,phase_list=['S','s']) 
            if len(Spaths)==0:
                print 'ERROR: I give up, no direct S in spite of multiple attempts at subfault '+str(kfault)

            #Get direct s path and moho reflection
            mohoS=None
            directS=Spaths[0]
            directP=Ppaths[0]
            #print len(Spaths)
            if len(Spaths)==1: #only direct S
                pass
            else:
                #turn_depth=zeros(len(Spaths)-1) #turning depth of other non-direct rays
                #for k in range(1,len(Spaths)):
                #    turn_depth[k-1]=Spaths[k].path['depth'].max()
                ##If there's a ray that turns within 2km of Moho, callt hat guy the Moho reflection
                #deltaz=abs(turn_depth-moho_depth_in_km)
                #i=argmin(deltaz)
                #if deltaz[i]<2: #Yes, this is a moho reflection
                #    mohoS=Spaths[i+1]
                #else:
                #    mohoS=None
                mohoS=Spaths[-1]
                 
 
            #######         Build Direct P ray           ######
            if Pwave==True:
                take_off_angle_P=directP.takeoff_angle
                
                #Get attenuation due to geometrical spreading (from the path length)
                path_length_P=get_path_length(directP,zs,dist_in_degs)
                path_length_P=path_length_P*100 #to cm
                
                #Get effect of intrinsic attenuation for that ray (path integrated)
                Q_P=get_attenuation(f,structure,directS,Qexp,Qtype='P')
                
                #Build the entire path term
                G_P=(I*Q_P)/path_length_P

                #Get conically averaged radiation pattern terms
                RP=conically_avg_P_radiation_pattern(strike,dip,rake,azimuth,take_off_angle_P)
                RP=abs(RP)
                   
                #Get partition of Pwave into Z and N,E components 
                incidence_angle=directP.incident_angle
                Npartition,Epartition,Zpartition=get_P_wave_partition(incidence_angle,azimuth)
                if component=='Z':
                   Ppartition=Zpartition 
                elif component=='N':
                    Ppartition=Npartition
                else:
                    Ppartition=Epartition
                    
                #And finally multiply everything together to get the subfault amplitude spectrum
                AP=CP*S*G_P*P*RP*Ppartition           

                #Generate windowed time series
                duration=1./fc_subfault+0.09*(dist/1000)
                w=windowed_gaussian(duration,hf_dt,window_type='saragoni_hart')
                
                #Go to frequency domain, apply amplitude spectrum and ifft for final time series
                hf_seis_P=apply_spectrum(w,AP,f,hf_dt)
                
                #What time after OT should this time series start at?
                time_insert=directP.path['time'][-1]+onset_times[kfault]
#                if directP.time+onset_times[kfault] < earliestP:
#                    earliestP=directP.time+onset_times[kfault]
#                    earliestP_kfault=kfault
                i=argmin(abs(t-time_insert))
                j=i+len(hf_seis_P)
                
                #Check seismogram doesn't go past last sample
                if i<len(hf)-1: #if i (the beginning of the seimogram) is less than the length
                    if j>len(hf): #seismogram goes past total_duration length, trim it
                        len_paste=len(hf)-i
                        j=len(hf)
                        #Add seismogram
                        hf[i:j]=hf[i:j]+real(hf_seis_P[0:len_paste])
                    else: #Lengths are fine
                        hf[i:j]=hf[i:j]+real(hf_seis_P)      
                else: #Seismogram starts after end of available space
                    pass   
                
                                           
                                                                  
                                                                                                                
                          
            #######         Build Direct S ray           ######
            take_off_angle_S=directS.takeoff_angle
            
            #Get attenuation due to geometrical spreading (from the path length)
            path_length_S=get_path_length(directS,zs,dist_in_degs)
            path_length_S=path_length_S*100 #to cm
            
            #Get effect of intrinsic aptimeenuation for that ray (path integrated)
            Q_S=get_attenuation(f,structure,directS,Qexp)
            
            #Build the entire path term
            G_S=(I*Q_S)/path_length_S

            #Get conically averaged radiation pattern terms
            if component=='Z':
                RP_vert=conically_avg_vert_radiation_pattern(strike,dip,rake,azimuth,take_off_angle_S)
                #And finally multiply everything together to get the subfault amplitude spectrum
                AS=CS*S*G_S*P*RP_vert   
            else:
                RP=conically_avg_radiation_pattern(strike,dip,rake,azimuth,take_off_angle_S,component_angle)
                RP=abs(RP)
                #And finally multiply everything together to get the subfault amplitude spectrum
                AS=CS*S*G_S*P*RP                

            #Generate windowed time series
            duration=1./fc_subfault+0.063*(dist/1000)
            w=windowed_gaussian(duration,hf_dt,window_type='saragoni_hart')
            #w=windowed_gaussian(3*duration,hf_dt,window_type='cua',ptime=Ppaths[0].path['time'][-1],stime=Spaths[0].path['time'][-1])
            
            #Go to frequency domain, apply amplitude spectrum and ifft for final time series
            hf_seis_S=apply_spectrum(w,AS,f,hf_dt)
            
            #What time after OT should this time series start at?
            time_insert=directS.path['time'][-1]+onset_times[kfault]
            #print 'ts = '+str(time_insert)+' , Td = '+str(duration)
            #time_insert=Ppaths[0].path['time'][-1]
            i=argmin(abs(t-time_insert))
            j=i+len(hf_seis_S)
            
            
            #Check seismogram doesn't go past last sample
            if i<len(hf)-1: #if i (the beginning of the seimogram) is less than the length
                if j>len(hf): #seismogram goes past total_duration length, trim it
                    len_paste=len(hf)-i
                    j=len(hf)
                    #Add seismogram
                    hf[i:j]=hf[i:j]+real(hf_seis_S[0:len_paste])
                else: #Lengths are fine
                    hf[i:j]=hf[i:j]+real(hf_seis_S)
            else: #Beginning of seismogram is past end of available space
                pass
            
            
            #######         Build Moho reflected S ray           ######
#            if mohoS==None:
#                pass
#            else:
#                if kfault%100==0:
#                    print '... ... building Moho reflected S wave'
#                take_off_angle_mS=mohoS.takeoff_angle
#                
#                #Get attenuation due to geometrical spreading (from the path length)
#                path_length_mS=get_path_length(mohoS,zs,dist_in_degs)
#                path_length_mS=path_length_mS*100 #to cm
#                
#                #Get effect of intrinsic aptimeenuation for that ray (path integrated)
#                Q_mS=get_attenuation(f,structure,mohoS,Qexp)
#                
#                #Build the entire path term
#                G_mS=(I*Q_mS)/path_length_mS
#
#                #Get conically averaged radiation pattern terms
#                if component=='Z':
#                    RP_vert=conically_avg_vert_radiation_pattern(strike,dip,rake,azimuth,take_off_angle_mS)
#                    #And finally multiply everything together to get the subfault amplitude spectrum
#                    A=C*S*G_mS*P*RP_vert   
#                else:
#                    RP=conically_avg_radiation_pattern(strike,dip,rake,azimuth,take_off_angle_mS,component_angle)
#                    RP=abs(RP)
#                    #And finally multiply everything together to get the subfault amplitude spectrum
#                    A=C*S*G_mS*P*RP                
#
#                #Generate windowed time series
#                duration=1./fc_subfault+0.063*(dist/1000)
#                w=windowed_gaussian(duration,hf_dt,window_type='saragoni_hart')
#                #w=windowed_gaussian(3*duration,hf_dt,window_type='cua',ptime=Ppaths[0].path['time'][-1],stime=Spaths[0].path['time'][-1])
#                
#                #Go to frequency domain, apply amplitude spectrum and ifft for final time series
#                hf_seis=apply_spectrum(w,A,f,hf_dt)
#                
#                #What time after OT should this time series start at?
#                time_insert=mohoS.path['time'][-1]+onset_times[kfault]
#                #print 'ts = '+str(time_insert)+' , Td = '+str(duration)
#                #time_insert=Ppaths[0].path['time'][-1]
#                i=argmin(abs(t-time_insert))
#                j=i+len(hf_seis)
#                
#                #Add seismogram
#                hf[i:j]=hf[i:j]+hf_seis
#                
#                #Done, reset
#                mohoS=None        
#        if kfault==0:
#            out=''' More:
#            fc_scale = %10.4f
#            subfaultM0 = %E
#            mu = %E
#            CS = %E
#            CP = %E
#            vr = %10.4f
#            dip_factor = %10.4f
#            fc_subfault = %10.4f
#            directS = %s
#            directP = %s
#            '''%(fc_scale,subfault_M0[kfault],mu,CS,CP,vr,dip_factor,fc_subfault,str(directS.time),str(directP.time))
#            print out
#            logfile.write(out)
#    logfile.close()
    #Done
    tr.data=hf/100 #convert to m/s**2
    #Add station location, event location, and first P-wave arrival time to SAC header
    tr.stats.update({'sac':{'stlo':sta_lon,'stla':sta_lat,'evlo':epicenter[0],'evla':epicenter[1],'evdp':epicenter[2],'dist':dist_in_km,'az':az,'baz':backaz,'mag':Mw}}) #,'idep':"ACC (m/s^2)" not sure why idep won't work
    #Return trace for writing to file    
#    print "Earliest P wave Comes at " + str(earliestP) + "after OT, from location " + str(fault[earliestP_kfault,1]) + ", " + str(fault[earliestP_kfault,2]) + ", " +str(fault[earliestP_kfault,3])
    return tr
Beispiel #48
0
def match_filter(template_names, template_list, st, threshold,
                 threshold_type, trig_int, plotvar, plotdir='.', cores=1,
                 tempdir=False, debug=0, plot_format='jpg'):
    r"""Over-arching code to run the correlations of given templates with a\
    day of seismic data and output the detections based on a given threshold.

    :type template_names: list
    :param template_names: List of template names in the same order as\
     template_list
    :type template_list: list :class: 'obspy.Stream'
    :param template_list: A list of templates of which each template is a\
        Stream of obspy traces containing seismic data and header information.
    :type st: :class: 'obspy.Stream'
    :param st: An obspy.Stream object containing all the data available and\
        required for the correlations with templates given.  For efficiency\
        this should contain no excess traces which are not in one or more of\
        the templates.  This will now remove excess traces internally, but\
        will copy the stream and work on the copy, leaving your input stream\
        untouched.
    :type threshold: float
    :param threshold: A threshold value set based on the threshold_type
    :type threshold_type: str
    :param threshold_type: The type of threshold to be used, can be MAD,\
        absolute or av_chan_corr.    MAD threshold is calculated as the\
        threshold*(median(abs(cccsum))) where cccsum is the cross-correlation\
        sum for a given template. absolute threhsold is a true absolute\
        threshold based on the cccsum value av_chan_corr is based on the mean\
        values of single-channel cross-correlations assuming all data are\
        present as required for the template, \
        e.g. av_chan_corr_thresh=threshold*(cccsum/len(template)) where\
        template is a single template from the input and the length is the\
        number of channels within this template.
    :type trig_int: float
    :param trig_int: Minimum gap between detections in seconds.
    :type plotvar: bool
    :param plotvar: Turn plotting on or off
    :type plotdir: str
    :param plotdir: Path to plotting folder, plots will be output here,\
        defaults to run location.
    :type tempdir: String or False
    :param tempdir: Directory to put temporary files, or False
    :type cores: int
    :param cores: Number of cores to use
    :type debug: int
    :param debug: Debug output level, the bigger the number, the more the\
        output.

    :return: :class: 'DETECTIONS' detections for each channel formatted as\
    :class: 'obspy.UTCDateTime' objects.

    .. rubric:: Note
        Plotting within the match-filter routine uses the Agg backend with\
        interactive plotting turned off.  This is because the function is\
        designed to work in bulk.  If you wish to turn interactive plotting on\
        you must import matplotlib in your script first, when you them import\
        match_filter you will get the warning that this call to matplotlib has\
        no effect, which will mean that match_filter has not changed the\
        plotting behaviour.
    """
    import matplotlib
    matplotlib.use('Agg')
    import matplotlib.pyplot as plt
    plt.ioff()
    import copy
    from eqcorrscan.utils import EQcorrscan_plotting
    from eqcorrscan.utils import findpeaks
    from obspy import Trace
    import time

    # Copy the stream here because we will f**k about with it
    stream = st.copy()
    templates = copy.deepcopy(template_list)
    # Debug option to confirm that the channel names match those in the
    # templates
    if debug >= 2:
        template_stachan = []
        data_stachan = []
        for template in templates:
            for tr in template:
                template_stachan.append(tr.stats.station + '.' +
                                        tr.stats.channel)
        for tr in stream:
            data_stachan.append(tr.stats.station + '.' + tr.stats.channel)
        template_stachan = list(set(template_stachan))
        data_stachan = list(set(data_stachan))
        if debug >= 3:
            print 'I have template info for these stations:'
            print template_stachan
            print 'I have daylong data for these stations:'
            print data_stachan
    # Perform a check that the daylong vectors are daylong
    for tr in stream:
        if not tr.stats.sampling_rate * 86400 == tr.stats.npts:
            msg = ' '.join(['Data are not daylong for', tr.stats.station,
                            tr.stats.channel])
            raise ValueError(msg)
    # Call the _template_loop function to do all the correlation work
    outtic = time.clock()
    # Edit here from previous, stable, but slow match_filter
    # Would be worth testing without an if statement, but with every station in
    # the possible template stations having data, but for those without real
    # data make the data NaN to return NaN ccc_sum
    # Note: this works
    if debug >= 2:
        print 'Ensuring all template channels have matches in daylong data'
    template_stachan = []
    for template in templates:
        for tr in template:
            template_stachan += [(tr.stats.station, tr.stats.channel)]
    template_stachan = list(set(template_stachan))
    # Copy this here to keep it safe
    for stachan in template_stachan:
        if not stream.select(station=stachan[0], channel=stachan[1]):
            # Remove template traces rather than adding NaN data
            for template in templates:
                if template.select(station=stachan[0], channel=stachan[1]):
                    for tr in template.select(station=stachan[0],
                                              channel=stachan[1]):
                        template.remove(tr)
    # Remove un-needed channels
    for tr in stream:
        if not (tr.stats.station, tr.stats.channel) in template_stachan:
            stream.remove(tr)
    # Also pad out templates to have all channels
    for template in templates:
        for stachan in template_stachan:
            if not template.select(station=stachan[0], channel=stachan[1]):
                nulltrace = Trace()
                nulltrace.stats.station = stachan[0]
                nulltrace.stats.channel = stachan[1]
                nulltrace.stats.sampling_rate = template[0].stats.sampling_rate
                nulltrace.stats.starttime = template[0].stats.starttime
                nulltrace.data = np.array([np.NaN] * len(template[0].data),
                                          dtype=np.float32)
                template += nulltrace
    if debug >= 2:
        print 'Starting the correlation run for this day'
    [cccsums, no_chans] = _channel_loop(templates, stream, cores, debug)
    if len(cccsums[0]) == 0:
        raise ValueError('Correlation has not run, zero length cccsum')
    outtoc = time.clock()
    print ' '.join(['Looping over templates and streams took:',
                    str(outtoc - outtic), 's'])
    if debug >= 2:
        print ' '.join(['The shape of the returned cccsums is:',
                        str(np.shape(cccsums))])
        print ' '.join(['This is from', str(len(templates)), 'templates'])
        print ' '.join(['Correlated with', str(len(stream)),
                        'channels of data'])
    detections = []
    for i, cccsum in enumerate(cccsums):
        template = templates[i]
        if threshold_type == 'MAD':
            rawthresh = threshold * np.median(np.abs(cccsum))
        elif threshold_type == 'absolute':
            rawthresh = threshold
        elif threshold == 'av_chan_corr':
            rawthresh = threshold * (cccsum / len(template))
        else:
            print 'You have not selected the correct threshold type, I will' +\
                  'use MAD as I like it'
            rawthresh = threshold * np.mean(np.abs(cccsum))
        # Findpeaks returns a list of tuples in the form [(cccsum, sample)]
        print ' '.join(['Threshold is set at:', str(rawthresh)])
        print ' '.join(['Max of data is:', str(max(cccsum))])
        print ' '.join(['Mean of data is:', str(np.mean(cccsum))])
        if np.abs(np.mean(cccsum)) > 0.05:
            warnings.warn('Mean is not zero!  Check this!')
        # Set up a trace object for the cccsum as this is easier to plot and
        # maintins timing
        if plotvar:
            stream_plot = copy.deepcopy(stream[0])
            # Downsample for plotting
            stream_plot.decimate(int(stream[0].stats.sampling_rate / 10))
            cccsum_plot = Trace(cccsum)
            cccsum_plot.stats.sampling_rate = stream[0].stats.sampling_rate
            # Resample here to maintain shape better
            cccsum_hist = cccsum_plot.copy()
            cccsum_hist = cccsum_hist.decimate(int(stream[0].stats.sampling_rate /
                                                   10)).data
            cccsum_plot = EQcorrscan_plotting.chunk_data(cccsum_plot, 10,
                                                         'Maxabs').data
            # Enforce same length
            stream_plot.data = stream_plot.data[0:len(cccsum_plot)]
            cccsum_plot = cccsum_plot[0:len(stream_plot.data)]
            cccsum_hist = cccsum_hist[0:len(stream_plot.data)]
            EQcorrscan_plotting.triple_plot(cccsum_plot, cccsum_hist,
                                            stream_plot, rawthresh, True,
                                            plotdir + '/cccsum_plot_' +
                                            template_names[i] + '_' +
                                            stream[0].stats.starttime.datetime.strftime('%Y-%m-%d') +
                                            '.' + plot_format)
            if debug >= 4:
                print ' '.join(['Saved the cccsum to:', template_names[i],
                                stream[0].stats.starttime.datetime.strftime('%Y%j')])
                np.save(template_names[i] +
                        stream[0].stats.starttime.datetime.strftime('%Y%j'),
                        cccsum)
        tic = time.clock()
        if debug >= 4:
            np.save('cccsum_' + str(i) + '.npy', cccsum)
        if debug >= 3 and max(cccsum) > rawthresh:
            peaks = findpeaks.find_peaks2_short(cccsum, rawthresh,
                                                trig_int * stream[0].stats.sampling_rate,
                                                debug,
                                                stream[0].stats.starttime,
                                                stream[0].stats.sampling_rate)
        elif max(cccsum) > rawthresh:
            peaks = findpeaks.find_peaks2_short(cccsum, rawthresh,
                                                trig_int * stream[0].stats.sampling_rate,
                                                debug)
        else:
            print 'No peaks found above threshold'
            peaks = False
        toc = time.clock()
        if debug >= 1:
            print ' '.join(['Finding peaks took:', str(toc - tic), 's'])
        if peaks:
            for peak in peaks:
                detecttime = stream[0].stats.starttime +\
                    peak[1] / stream[0].stats.sampling_rate
                detections.append(DETECTION(template_names[i],
                                            detecttime,
                                            no_chans[i], peak[0], rawthresh,
                                            'corr'))
    del stream, templates
    return detections
Beispiel #49
0
def synth_compare(stream, stream_list, cores=4, debug=0):
    r"""Compare a specific stream to a list of synthetic templates, or \
    earthquakes of known source and find the best matching event.

    :type stream: :class:obspy.Stream
    :param stream: Stream to be compared to streams with known locations.
    :type stream_list: list
    :param stream_list: List of streams with known locations
    :type cores: int
    :param cores: Number of cores to parallel over
    :type debug: int
    :param debug: Debug level, high is more debug

    :returns: int, float: index of best match and cross-correlation sum
    """

    from eqcorrscan.core.match_filter import _channel_loop
    import numpy as np
    import copy
    from obspy import Trace

    stream_copy = stream.copy()
    templates = copy.deepcopy(stream_list)
    # Need to fill the stream_list - template - channels
    template_stachan = []
    for template in templates:
        for tr in template:
            template_stachan += [(tr.stats.station, tr.stats.channel)]
    template_stachan = list(set(template_stachan))

    for stachan in template_stachan:
        if not stream_copy.select(station=stachan[0], channel=stachan[1]):
            # Remove template traces rather than adding NaN data
            for template in templates:
                if template.select(station=stachan[0], channel=stachan[1]):
                    for tr in template.select(station=stachan[0],
                                              channel=stachan[1]):
                        template.remove(tr)
    # Remove un-needed channels
    for tr in stream_copy:
        if not (tr.stats.station, tr.stats.channel) in template_stachan:
            stream_copy.remove(tr)
    # Also pad out templates to have all channels
    for template in templates:
        for stachan in template_stachan:
            if not template.select(station=stachan[0], channel=stachan[1]):
                nulltrace = Trace()
                nulltrace.stats.station = stachan[0]
                nulltrace.stats.channel = stachan[1]
                nulltrace.stats.sampling_rate = template[0].stats.sampling_rate
                nulltrace.stats.starttime = template[0].stats.starttime
                nulltrace.data = np.array([np.NaN] * len(template[0].data),
                                          dtype=np.float32)
                template += nulltrace
    # Hand off  cross-correaltion to _channel_loop, which runs in parallel
    [cccsums, no_chans] = _channel_loop(templates, stream_copy, cores, debug)
    cccsums = [np.max(cccsum) for cccsum in cccsums]
    # Find the maximum cccsum and index thereof
    index = np.argmax(cccsums)
    cccsum = cccsums[index]
    return index, cccsum
Beispiel #50
0
def template_grid(stations, nodes, travel_times, phase, PS_ratio=1.68, \
                samp_rate=100, flength=False, phaseout='all'):
    """
    Function to generate a group of synthetic seismograms to simulate phase
    arrivals from a grid of known sources in a three-dimensional model.  Lags
    must be known and supplied, these can be generated from the bright_lights
    function: read_tt, and resampled to fit the desired grid dimensions and
    spacing using other functions therein.  These synthetic seismograms are very
    simple models of seismograms using the seis_sim function herein.  These
    approximate body-wave P and S first arrivals as spikes convolved with damped
    sine waves.

    :type stations: List
    :param stations: List of the station names
    :type nodes: list of tuple
    :param nodes: List of node locations in (lon,lat,depth)
    :type travel_times: np.ndarray
    :param travel_times: Array of travel times where travel_times[i][:] refers \
        to the travel times for station=stations[i], and travel_times[i][j] \
        refers to stations[i] for nodes[j]
    :type phase: String
    :param phase: Can be either 'P' or 'S'
    :type PS_ratio: float
    :param PS_ratio: P/S velocity ratio, defaults to 1.68
    :type samp_rate: float
    :param samp_rate: Desired sample rate in Hz, defaults to 100.0
    :type flength: int
    :param flength: Length of template in samples, defaults to False
    :type phaseout: str
    :param phaseout: Either 'S', 'P', 'all' or 'both', determines which phases \
            to clip around.  'all' Encompasses both phases in one channel, but\
            will return nothing if the flength is not long enough, 'both' will\
            return two channels for each stations, one SYN_Z with the synthetic\
            P-phase, and one SYN_H with the synthetic S-phase.

    :returns: List of :class:obspy.Stream
    """
    import warnings
    if not phase in ['S','P']:
        raise IOError('Phase is neither P nor S')
    from obspy import Stream, Trace
    #Initialize empty list for templates
    templates=[]
    # Loop through the nodes, for every node generate a template!
    for i, node in enumerate(nodes):
        st=[] # Empty list to be filled with synthetics
        # Loop through stations
        for j, station in enumerate(stations):
            tr=Trace()
            tr.stats.sampling_rate=samp_rate
            tr.stats.station=station
            tr.stats.channel='SYN'
            tt=travel_times[j][i]
            if phase=='P':
                # If the input travel-time is the P-wave travel-time
                SP_time=(tt*PS_ratio)-tt
                if phaseout=='S':
                    tr.stats.starttime+=tt+SP_time
                else:
                    tr.stats.starttime+=tt
            elif phase=='S':
                # If the input travel-time is the S-wave travel-time
                SP_time=tt-(tt/PS_ratio)
                if phaseout=='S':
                    tr.stats.starttime+=tt
                else:
                    tr.stats.starttime+=tt-SP_time
            else:
                raise IOError('Input grid is not P or S')
            # Set start-time of trace to be travel-time for P-wave
            # Check that the template length is long enough to include the SP
            # if SP_time*samp_rate > flength-11:
            #     print 'No template for '+station
            #     print 'Travel-time is :'+str(tt)
            #     print node
            #     print 'SP time is: '+str(SP_time)
            #     #warnings.warn('Cannot make this template, SP-time '+str(SP_time)+\
            #                     # ' longer than length: '+str(flength/samp_rate))
            #     continue
            if SP_time*samp_rate < flength-11 and phaseout=='all':
                tr.data=seis_sim(SP=int(SP_time*samp_rate), amp_ratio=1.5,\
                                flength=flength, phaseout=phaseout)
                st.append(tr)
            elif phaseout=='all':
                warnings.warn('Cannot make a bulk synthetic with this fixed '+\
                              'length for station '+station)
            elif phaseout in ['P','S']:
                tr.data=seis_sim(SP=int(SP_time*samp_rate), amp_ratio=1.5,\
                                flength=flength, phaseout=phaseout)
                st.append(tr)
            elif phaseout == 'both':
                for _phaseout in ['P', 'S']:
                    _tr=tr.copy()
                    _tr.data=seis_sim(SP=int(SP_time*samp_rate), amp_ratio=1.5,\
                                flength=flength, phaseout=_phaseout)
                    if _phaseout=='P':
                        _tr.stats.channel='SYN_Z'
                        # starttime defaults to S-time
                        _tr.stats.starttime=_tr.stats.starttime-SP_time
                    elif _phaseout=='S':
                        _tr.stats.channel='SYN_H'
                    st.append(_tr)
        templates.append(Stream(st))
        # Stream(st).plot(size=(800,600))
    return templates
Beispiel #51
0
def stochastic_simulation(home,project_name,rupture_name,GF_list,time_epi,model_name,
        rise_time_depths,moho_depth_in_km,total_duration=100,hf_dt=0.01,stress_parameter=50,
        kappa=0.04,Qexp=0.6,component='N',Pwave=False): 
    '''
    Run stochastic HF sims
    
    stress parameter is in bars
    '''
    
    from numpy import genfromtxt,pi,logspace,log10,mean,where,exp,arange,zeros,argmin,rad2deg,arctan2
    from pyproj import Geod
    from obspy.geodetics import kilometer2degrees
    from obspy.taup import taup_create,TauPyModel
    from mudpy.forward import get_mu
    from obspy import Stream,Trace
    from matplotlib import pyplot as plt
    
    #initalize  output object
    st=Stream()
    
    #Load the source
    fault=genfromtxt(home+project_name+'/output/ruptures/'+rupture_name)    
    
    #Onset times for each subfault
    onset_times=fault[:,12]
    
    #Load stations
    sta=genfromtxt(home+project_name+'/data/station_info/'+GF_list,usecols=[0],dtype='S')
    lonlat=genfromtxt(home+project_name+'/data/station_info/'+GF_list,usecols=[1,2])
    
    #load velocity structure
    structure=genfromtxt(home+project_name+'/structure/'+model_name)
    
    #Frequencies vector
    f=logspace(log10(hf_dt),log10(1/(2*hf_dt))+0.01,50)
    omega=2*pi*f
    
    #Output time vector (0 is origin time)
    t=arange(0,total_duration,hf_dt)
    
    #Projection object for distance calculations
    g=Geod(ellps='WGS84')
    
    #Create taup velocity model object, paste on top of iaspei91
    #taup_create.build_taup_model(home+project_name+'/structure/bbp_norcal.tvel',output_folder=home+project_name+'/structure/')
    velmod=TauPyModel(model=home+project_name+'/structure/bbp_norcal',verbose=True)
    
    #Moments
    slip=(fault[:,8]**2+fault[:,9]**2)**0.5
    subfault_M0=slip*fault[:,10]*fault[:,11]*fault[:,13]
    subfault_M0=subfault_M0*1e7 #to dyne-cm
    M0=subfault_M0.sum()
    relative_subfault_M0=subfault_M0/M0
    
    #Corner frequency scaling
    i=where(slip>0)[0] #Non-zero faults
    N=len(i) #number of subfaults
    dl=mean((fault[:,10]+fault[:,11])/2) #perdominant length scale
    dl=dl/1000 # to km
    
    # Frankel 95 scaling of corner frequency #verified this looks the same in GP
    # Right now this applies the same factor to all faults
    # Move inside the loop with right dl????
    fc_scale=(M0)/(N*stress_parameter*dl**3*1e21) #Frankel scaling
    
    #Move this inisde loop?
    small_event_M0 = stress_parameter*dl**3*1e21
    
    #Tau=p perturbation
    tau_perturb=0.1
    
    #Loop over stations
    for ksta in range(len(lonlat)):
    
        print '... working on '+component+' component semistochastic waveform for station '+sta[ksta]
    
        #initalize output seismogram
        tr=Trace()
        tr.stats.station=sta[ksta]
        tr.stats.delta=hf_dt
        tr.stats.starttime=time_epi
        hf=zeros(len(t))
        
        #Loop over subfaults
        for kfault in range(len(fault)):
            
            #Include only subfaults with non-zero slip
            if subfault_M0[kfault]>0:
            
                #Get subfault to station distance
                lon_source=fault[kfault,1]
                lat_source=fault[kfault,2]
                azimuth,baz,dist=g.inv(lon_source,lat_source,lonlat[ksta,0],lonlat[ksta,1])
                dist_in_degs=kilometer2degrees(dist/1000.)
                
                #Get rho, alpha, beta at subfault depth
                zs=fault[kfault,3]
                mu,alpha,beta=get_mu(structure,zs,return_speeds=True)
                rho=mu/beta**2
                
                #Get radiation scale factor
                Spartition=1/2**0.5
                if component=='N' :
                    component_angle=0
                elif component=='E':
                    component_angle=90
                
                rho=rho/1000 #to g/cm**3
                beta=(beta/1000)*1e5 #to cm/s
                alpha=(alpha/1000)*1e5
                
                #Verified this produces same value as in GP
                CS=(2*Spartition)/(4*pi*(rho)*(beta**3))
                CP=2/(4*pi*(rho)*(alpha**3))
                
                #Get local subfault rupture speed
                beta=beta/100 #to m/s
                vr=get_local_rupture_speed(zs,beta,rise_time_depths)
                vr=vr/1000 #to km/s
                dip_factor=get_dip_factor(fault[kfault,5],fault[kfault,8],fault[kfault,9])
                
                #Subfault corner frequency
                c0=2.0 #GP2015 value
                fc_subfault=(c0*vr)/(dip_factor*pi*dl)
                
                #get subfault source spectrum
                #S=((relative_subfault_M0[kfault]*M0/N)*f**2)/(1+fc_scale*(f/fc_subfault)**2)
                S=small_event_M0*(omega**2/(1+(f/fc_subfault)**2))
                frankel_conv_operator= fc_scale*((fc_subfault**2+f**2)/(fc_subfault**2+fc_scale*f**2))
                S=S*frankel_conv_operator
                
                #get high frequency decay
                P=exp(-pi*kappa*f)
                
                #get quarter wavelength amplificationf actors
                # pass rho in kg/m^3 (this units nightmare is what I get for following Graves' code)
                I=get_amplification_factors(f,structure,zs,beta,rho*1000)
                
                #Get other geometric parameters necessar for radiation pattern
                strike=fault[kfault,4]
                dip=fault[kfault,5]
                ss=fault[kfault,8]
                ds=fault[kfault,9]
                rake=rad2deg(arctan2(ds,ss))
                
                #Get ray paths for all direct S arrivals
                Ppaths=velmod.get_ray_paths(zs,dist_in_degs,phase_list=['P','p'])
                
                #Get ray paths for all direct S arrivals
                try:
                    Spaths=velmod.get_ray_paths(zs,dist_in_degs,phase_list=['S','s'])
                except:
                    Spaths=velmod.get_ray_paths(zs+tau_perturb,dist_in_degs,phase_list=['S','s'])

                #Get direct s path and moho reflection
                mohoS=None
                directS=Spaths[0]
                directP=Ppaths[0]
                #print len(Spaths)
                if len(Spaths)==1: #only direct S
                    pass
                else:
                    #turn_depth=zeros(len(Spaths)-1) #turning depth of other non-direct rays
                    #for k in range(1,len(Spaths)):
                    #    turn_depth[k-1]=Spaths[k].path['depth'].max()
                    ##If there's a ray that turns within 2km of Moho, callt hat guy the Moho reflection
                    #deltaz=abs(turn_depth-moho_depth_in_km)
                    #i=argmin(deltaz)
                    #if deltaz[i]<2: #Yes, this is a moho reflection
                    #    mohoS=Spaths[i+1]
                    #else:
                    #    mohoS=None
                    mohoS=Spaths[-1]
                     
 
                #######         Build Direct P ray           ######
                if Pwave==True:
                    take_off_angle_P=directP.takeoff_angle
                    
                    #Get attenuation due to geometrical spreading (from the path length)
                    path_length_P=get_path_length(directP,zs,dist_in_degs)
                    path_length_P=path_length_P*100 #to cm
                    
                    #Get effect of intrinsic aptimeenuation for that ray (path integrated)
                    Q_P=get_attenuation(f,structure,directS,Qexp,Qtype='P')
                    
                    #Build the entire path term
                    G_P=(I*Q_P)/path_length_P
    
                    #Get conically averaged radiation pattern terms
                    RP=conically_avg_P_radiation_pattern(strike,dip,rake,azimuth,take_off_angle_P)
                    RP=abs(RP)
                       
                    #Get partition of Pwave into Z and N,E components 
                    incidence_angle=directP.incident_angle
                    Npartition,Epartition,Zpartition=get_P_wave_partition(incidence_angle,azimuth)
                    if component=='Z':
                       Ppartition=Zpartition 
                    elif component=='N':
                        Ppartition=Npartition
                    else:
                        Ppartition=Epartition
                        
                    #And finally multiply everything together to get the subfault amplitude spectrum
                    AP=CP*S*G_P*P*RP*Ppartition           
    
                    #Generate windowed time series
                    duration=1./fc_subfault+0.09*(dist/1000)
                    w=windowed_gaussian(duration,hf_dt,window_type='saragoni_hart')
                    
                    #Go to frequency domain, apply amplitude spectrum and ifft for final time series
                    hf_seis_P=apply_spectrum(w,AP,f,hf_dt)
                    
                    #What time after OT should this time series start at?
                    time_insert=directP.path['time'][-1]+onset_times[kfault]
                    i=argmin(abs(t-time_insert))
                    j=i+len(hf_seis_P)
                    
                    #Add seismogram
                    hf[i:j]=hf[i:j]+hf_seis_P                    
                                               
                                                                      
                                                                                                                    
                              
                #######         Build Direct S ray           ######
                take_off_angle_S=directS.takeoff_angle
                
                #Get attenuation due to geometrical spreading (from the path length)
                path_length_S=get_path_length(directS,zs,dist_in_degs)
                path_length_S=path_length_S*100 #to cm
                
                #Get effect of intrinsic aptimeenuation for that ray (path integrated)
                Q_S=get_attenuation(f,structure,directS,Qexp)
                
                #Build the entire path term
                G_S=(I*Q_S)/path_length_S

                #Get conically averaged radiation pattern terms
                if component=='Z':
                    RP_vert=conically_avg_vert_radiation_pattern(strike,dip,rake,azimuth,take_off_angle_S)
                    #And finally multiply everything together to get the subfault amplitude spectrum
                    AS=CS*S*G_S*P*RP_vert   
                else:
                    RP=conically_avg_radiation_pattern(strike,dip,rake,azimuth,take_off_angle_S,component_angle)
                    RP=abs(RP)
                    #And finally multiply everything together to get the subfault amplitude spectrum
                    AS=CS*S*G_S*P*RP                

                #Generate windowed time series
                duration=1./fc_subfault+0.063*(dist/1000)
                w=windowed_gaussian(duration,hf_dt,window_type='saragoni_hart')
                #w=windowed_gaussian(3*duration,hf_dt,window_type='cua',ptime=Ppaths[0].path['time'][-1],stime=Spaths[0].path['time'][-1])
                
                #Go to frequency domain, apply amplitude spectrum and ifft for final time series
                hf_seis_S=apply_spectrum(w,AS,f,hf_dt)
                
                #What time after OT should this time series start at?
                time_insert=directS.path['time'][-1]+onset_times[kfault]
                #print 'ts = '+str(time_insert)+' , Td = '+str(duration)
                #time_insert=Ppaths[0].path['time'][-1]
                i=argmin(abs(t-time_insert))
                j=i+len(hf_seis_S)
                
                #Add seismogram
                hf[i:j]=hf[i:j]+hf_seis_S
                
                
                #######         Build Moho reflected S ray           ######
    #            if mohoS==None:
    #                pass
    #            else:
    #                if kfault%100==0:
    #                    print '... ... building Moho reflected S wave'
    #                take_off_angle_mS=mohoS.takeoff_angle
    #                
    #                #Get attenuation due to geometrical spreading (from the path length)
    #                path_length_mS=get_path_length(mohoS,zs,dist_in_degs)
    #                path_length_mS=path_length_mS*100 #to cm
    #                
    #                #Get effect of intrinsic aptimeenuation for that ray (path integrated)
    #                Q_mS=get_attenuation(f,structure,mohoS,Qexp)
    #                
    #                #Build the entire path term
    #                G_mS=(I*Q_mS)/path_length_mS
    #
    #                #Get conically averaged radiation pattern terms
    #                if component=='Z':
    #                    RP_vert=conically_avg_vert_radiation_pattern(strike,dip,rake,azimuth,take_off_angle_mS)
    #                    #And finally multiply everything together to get the subfault amplitude spectrum
    #                    A=C*S*G_mS*P*RP_vert   
    #                else:
    #                    RP=conically_avg_radiation_pattern(strike,dip,rake,azimuth,take_off_angle_mS,component_angle)
    #                    RP=abs(RP)
    #                    #And finally multiply everything together to get the subfault amplitude spectrum
    #                    A=C*S*G_mS*P*RP                
    #
    #                #Generate windowed time series
    #                duration=1./fc_subfault+0.063*(dist/1000)
    #                w=windowed_gaussian(duration,hf_dt,window_type='saragoni_hart')
    #                #w=windowed_gaussian(3*duration,hf_dt,window_type='cua',ptime=Ppaths[0].path['time'][-1],stime=Spaths[0].path['time'][-1])
    #                
    #                #Go to frequency domain, apply amplitude spectrum and ifft for final time series
    #                hf_seis=apply_spectrum(w,A,f,hf_dt)
    #                
    #                #What time after OT should this time series start at?
    #                time_insert=mohoS.path['time'][-1]+onset_times[kfault]
    #                #print 'ts = '+str(time_insert)+' , Td = '+str(duration)
    #                #time_insert=Ppaths[0].path['time'][-1]
    #                i=argmin(abs(t-time_insert))
    #                j=i+len(hf_seis)
    #                
    #                #Add seismogram
    #                hf[i:j]=hf[i:j]+hf_seis
    #                
    #                #Done, reset
    #                mohoS=None
                
        #Done add to trace and stream
        tr.data=hf/100 #convert to m/s**2
        st+=tr
    
    return st