Exemplo n.º 1
0
def corr_cluster(trace_list, thresh=0.9):
    """
    Group traces based on correlations above threshold with the stack - will
    run twice, once with a lower threshold, then again with your threshold to
    remove large outliers

    :type trace_list: List of :class:obspy.Trace
    :param trace_list: Traces to compute similarity between
    :type thresh: float
    :param thrsh: Correlation threshold between -1-1

    :returns: np.ndarray of bool
    """
    from eqcorrscan.utils import stacking
    from obspy import Stream
    from core.match_filter import normxcorr2
    stack=stacking.linstack([Stream(tr) for tr in trace_list])[0]
    output=np.array([False]*len(trace_list))
    group1=[]
    for i, tr in enumerate(trace_list):
        if normxcorr2(tr.data,stack.data)[0][0] > 0.6:
            output[i]=True
            group1.append(tr)
    stack=stacking.linstack([Stream(tr) for tr in group1])[0]
    group2=[]
    for i, tr in enumerate(trace_list):
        if normxcorr2(tr.data,stack.data)[0][0] > thresh:
            group2.append(tr)
            output[i]=True
        else:
            output[i]=False
    return output
Exemplo n.º 2
0
    def test_linstack(self):
        """Test the utils.stacking.linstack function."""
        # Generate synth data
        import numpy as np
        from obspy import Stream, Trace

        synth = Stream(Trace())
        synth[0].data = np.zeros(200)
        synth[0].data[100] = 1.0
        sine_x = np.arange(0, 10.0, 0.5)
        damped_sine = np.exp(-sine_x) * np.sin(2 * np.pi * sine_x)
        synth[0].data = np.convolve(synth[0].data, damped_sine)
        # Normalize:
        synth[0].data = synth[0].data / synth[0].data.max()
        maximum_synth = synth[0].data.max()
        RMS_max = np.sqrt(np.mean(np.square(synth[0].data)))
        streams = [synth.copy() for i in range(10)]

        stack = linstack(streams, normalize=True)
        # Check normalized amplitude is correct
        self.assertEqual(np.float32(stack[0].data.max()),
                         np.float32(10 * maximum_synth / RMS_max))
        stack = linstack(streams, normalize=False)
        # Check amplitude is preserved
        self.assertEqual(stack[0].data.max(), 10 * maximum_synth)
        # Check length is preserved
        self.assertEqual(len(synth[0].data), len(stack[0].data))
Exemplo n.º 3
0
 def test_linstack(self):
     """Test the utils.stacking.linstack function."""
     stack = linstack(self.streams, normalize=True)
     # Check normalized amplitude is correct
     self.assertEqual(np.float32(stack[0].data.max()),
                      np.float32(10 * self.maximum_synth / self.RMS_max))
     stack = linstack(self.streams, normalize=False)
     # Check amplitude is preserved
     self.assertEqual(stack[0].data.max(), 10 * self.maximum_synth)
     # Check length is preserved
     self.assertEqual(len(self.synth[0].data), len(stack[0].data))
Exemplo n.º 4
0
def corr_cluster(trace_list, thresh=0.9):
    """
    Group traces based on correlations above threshold with the stack.

    Will run twice, once with 80% of threshold threshold to remove large
    outliers that would negatively affect the stack, then again with your
    threshold.

    :type trace_list: list
    :param trace_list:
        List of :class:`obspy.core.stream.Trace` to compute similarity between
    :type thresh: float
    :param thresh: Correlation threshold between -1-1

    :returns:
        :class:`numpy.ndarray` of bool of whether that trace correlates well
        enough (above your given threshold) with the stack.

    .. note::
        We recommend that you align the data before computing the clustering,
        e.g., the P-arrival on all templates for the same channel should
        appear at the same time in the trace.  See the
        :func:`eqcorrscan.utils.stacking.align_traces` function for a way to do
        this.
    """
    init_thresh = thresh * .8
    stack = stacking.linstack([Stream(tr) for tr in trace_list])[0]
    output = np.array([False] * len(trace_list))
    group1 = []
    array_xcorr = get_array_xcorr()
    for i, tr in enumerate(trace_list):
        cc = array_xcorr(np.array([tr.data]), stack.data, [0])[0][0][0]
        if cc > init_thresh:
            output[i] = True
            group1.append(tr)
    if len(group1) == 0:
        Logger.warning('Nothing made it past the first 80% threshold')
        return output
    stack = stacking.linstack([Stream(tr) for tr in group1])[0]
    group2 = []
    for i, tr in enumerate(trace_list):
        if array_xcorr(
                np.array([tr.data]), stack.data, [0])[0][0][0] > thresh:
            group2.append(tr)
            output[i] = True
        else:
            output[i] = False
    return output
Exemplo n.º 5
0
def empirical_SVD(stream_list, linear=True):
    """
    Empirical subspace detector generation function.  Takes a list of templates
    and computes the stack as the first order subspace detector, and the
    differential of this as the second order subspace detector following
    the emprical subspace method of Barrett & Beroza, 2014 - SRL.

    :type stream_list: list of stream
    :param stream_list: list of template streams to compute the subspace detectors\
        from
    :type linear: Bool
    :param linear: Set to true by default to compute the linear stack as the\
        first subspace vector, False will use the phase-weighted stack as the\
        first subspace vector.

    :returns: list of two streams
    """
    from eqcorrscan.utils import stacking
    if linear:
        first_subspace=stacking.linstack(stream_list)
    second_subspace=first_subspace.copy()
    for i in range(len(second_subspace)):
        second_subspace[i].data=np.diff(second_subspace[i].data)
        second_subspace[i].stats.starttime+=0.5*second_subspace[i].stats.delta
    return [first_subspace, second_subspace]
Exemplo n.º 6
0
def empirical_SVD(stream_list, linear=True):
    """
    Empirical subspace detector generation function.

    Takes a list of \
    templates and computes the stack as the first order subspace detector, \
    and the differential of this as the second order subspace detector \
    following the empirical subspace method of
    `Barrett & Beroza, 2014 - SRL
    <http://srl.geoscienceworld.org/content/85/3/594.extract>`_.

    :type stream_list: list
    :param stream_list:
        list of streams to compute the subspace detectors from, where streams
        are :class:`obspy.core.stream.Stream` objects.
    :type linear: bool
    :param linear: Set to true by default to compute the linear stack as the \
        first subspace vector, False will use the phase-weighted stack as the \
        first subspace vector.

    :returns: list of two :class:`obspy.core.stream.Stream` s
    """
    from eqcorrscan.utils import stacking
    # Run a check to ensure all traces are the same length
    stachans = list(
        set([(tr.stats.station, tr.stats.channel) for st in stream_list
             for tr in st]))
    for stachan in stachans:
        lengths = []
        for st in stream_list:
            lengths.append(
                len(st.select(station=stachan[0], channel=stachan[1])[0]))
        min_length = min(lengths)
        for st in stream_list:
            tr = st.select(station=stachan[0], channel=stachan[1])[0]
            if len(tr.data) > min_length:
                if abs(len(tr.data) - min_length) > (0.1 *
                                                     tr.stats.sampling_rate):
                    raise IndexError('More than 0.1 s length '
                                     'difference, align and fix')
                warnings.warn(
                    str(tr) + ' is not the same length as others, ' +
                    'trimming the end')
                tr.data = tr.data[0:min_length]
    if linear:
        first_subspace = stacking.linstack(stream_list)
    else:
        first_subspace = stacking.PWS_stack(streams=stream_list)
    second_subspace = first_subspace.copy()
    for i in range(len(second_subspace)):
        second_subspace[i].data = np.diff(second_subspace[i].data)
        second_subspace[i].stats.starttime += 0.5 * \
            second_subspace[i].stats.delta
    return [first_subspace, second_subspace]
Exemplo n.º 7
0
def corr_cluster(trace_list, thresh=0.9):
    """
    Group traces based on correlations above threshold with the stack.

    Will run twice, once with a lower threshold, then again with your
    threshold to remove large outliers.

    :type trace_list: list of obspy.Trace
    :param trace_list: Traces to compute similarity between
    :type thresh: float
    :param thresh: Correlation threshold between -1-1

    :returns: np.ndarray of bool

    .. note:: We recommend that you align the data before computing the \
        clustering, e.g., the P-arrival on all templates for the same channel \
        should appear at the same time in the trace.  See the \
        stacking.align_traces function for a way to do this
    """
    from eqcorrscan.utils import stacking
    from obspy import Stream
    from eqcorrscan.core.match_filter import normxcorr2
    stack = stacking.linstack([Stream(tr) for tr in trace_list])[0]
    output = np.array([False] * len(trace_list))
    group1 = []
    for i, tr in enumerate(trace_list):
        if normxcorr2(tr.data, stack.data)[0][0] > 0.6:
            output[i] = True
            group1.append(tr)
    if not group1:
        warnings.warn('Nothing made it past the first 0.6 threshold')
        return output
    stack = stacking.linstack([Stream(tr) for tr in group1])[0]
    group2 = []
    for i, tr in enumerate(trace_list):
        if normxcorr2(tr.data, stack.data)[0][0] > thresh:
            group2.append(tr)
            output[i] = True
        else:
            output[i] = False
    return output
Exemplo n.º 8
0
def corr_cluster(trace_list, thresh=0.9):
    """
    Group traces based on correlations above threshold with the stack.

    Will run twice, once with a lower threshold, then again with your
    threshold to remove large outliers.

    :type trace_list: list of obspy.Trace
    :param trace_list: Traces to compute similarity between
    :type thresh: float
    :param thresh: Correlation threshold between -1-1

    :returns: np.ndarray of bool

    .. note:: We recommend that you align the data before computing the \
        clustering, e.g., the P-arrival on all templates for the same channel \
        should appear at the same time in the trace.  See the \
        stacking.align_traces function for a way to do this
    """
    from eqcorrscan.utils import stacking
    from obspy import Stream
    from eqcorrscan.core.match_filter import normxcorr2
    stack = stacking.linstack([Stream(tr) for tr in trace_list])[0]
    output = np.array([False]*len(trace_list))
    group1 = []
    for i, tr in enumerate(trace_list):
        if normxcorr2(tr.data, stack.data)[0][0] > 0.6:
            output[i] = True
            group1.append(tr)
    if not group1:
        warnings.warn('Nothing made it past the first 0.6 threshold')
        return output
    stack = stacking.linstack([Stream(tr) for tr in group1])[0]
    group2 = []
    for i, tr in enumerate(trace_list):
        if normxcorr2(tr.data, stack.data)[0][0] > thresh:
            group2.append(tr)
            output[i] = True
        else:
            output[i] = False
    return output
Exemplo n.º 9
0
def empirical_SVD(stream_list, linear=True):
    """
    Empirical subspace detector generation function.

    Takes a list of \
    templates and computes the stack as the first order subspace detector, \
    and the differential of this as the second order subspace detector \
    following the empirical subspace method of Barrett & Beroza, 2014 - SRL.

    :type stream_list: list of stream
    :param stream_list: list of template streams to compute the subspace \
        detectors from
    :type linear: bool
    :param linear: Set to true by default to compute the linear stack as the \
        first subspace vector, False will use the phase-weighted stack as the \
        first subspace vector.

    :returns: list of two streams
    """
    from eqcorrscan.utils import stacking
    # Run a check to ensure all traces are the same length
    stachans = list(set([(tr.stats.station, tr.stats.channel)
                         for st in stream_list for tr in st]))
    for stachan in stachans:
        lengths = []
        for st in stream_list:
            lengths.append(len(st.select(station=stachan[0],
                                         channel=stachan[1])[0]))
        min_length = min(lengths)
        for st in stream_list:
            tr = st.select(station=stachan[0],
                           channel=stachan[1])[0]
            if len(tr.data) > min_length:
                if abs(len(tr.data) - min_length) > 0.1 *\
                            tr.stats.sampling_rate:
                        raise IndexError('More than 0.1 s length '
                                         'difference, align and fix')
                warnings.warn(str(tr) + ' is not the same length as others, ' +
                              'trimming the end')
                tr.data = tr.data[0:min_length]
    if linear:
        first_subspace = stacking.linstack(stream_list)
    else:
        first_subspace = stacking.PWS_stack(streams=stream_list)
    second_subspace = first_subspace.copy()
    for i in range(len(second_subspace)):
        second_subspace[i].data = np.diff(second_subspace[i].data)
        second_subspace[i].stats.starttime += 0.5 * \
            second_subspace[i].stats.delta
    return [first_subspace, second_subspace]
Exemplo n.º 10
0
def multi_event_singlechan(streams, catalog, clip=10.0, pre_pick=2.0,
                           freqmin=False, freqmax=False, realign=False,
                           cut=(-3.0, 5.0), PWS=False, title=False):
    r"""Function to plot data from a single channel at a single station for \
    multiple events - data will be alligned by their pick-time given in the \
    picks.

    :type streams: list of :class:obspy.stream
    :param streams: List of the streams to use, can contain more traces than \
        you plan on plotting
    :type catalog: obspy.core.event.Catalog
    :param catalog: Catalog of events, one for each trace, with a single pick
    :type clip: float
    :param clip: Length in seconds to plot, defaults to 10.0
    :type pre_pick: float
    :param pre_pick: Length in seconds to extract and plot before the pick, \
        defaults to 2.0
    :type freqmin: float
    :param freqmin: Low cut for bandpass in Hz
    :type freqmax: float
    :param freqmax: High cut for bandpass in Hz
    :type realign: bool
    :param realign: To compute best alignement based on correlation or not.
    :type cut: tuple
    :param cut: tuple of start and end times for cut in seconds from the pick
    :type PWS: bool
    :param PWS: compute Phase Weighted Stack, if False, will compute linear \
        stack.
    :type title: str
    :param title: Plot title.

    :returns: Alligned and cut traces, and new picks
    """
    from eqcorrscan.utils import stacking
    import copy
    from eqcorrscan.core.match_filter import normxcorr2
    from obspy import Stream
    import warnings
    fig, axes = plt.subplots(len(catalog)+1, 1, sharex=True, figsize=(7, 12))
    axes = axes.ravel()
    traces = []
    al_traces = []
    # Keep input safe
    clist = copy.deepcopy(catalog)
    st_list = copy.deepcopy(streams)
    for i, event in enumerate(clist):
        if st_list[i].select(station=event.picks[0].waveform_id.station_code,
                             channel='*' +
                             event.picks[0].waveform_id.channel_code[-1]):
            tr = st_list[i].select(station=event.picks[0].waveforms_id.
                                   station_code,
                                   channel='*' +
                                   event.picks[0].waveform_id.
                                   channel_code[-1])[0]
        else:
            print('No data for '+event.pick[0].waveform_id)
            continue
        tr.detrend('linear')
        if freqmin:
            tr.filter('bandpass', freqmin=freqmin, freqmax=freqmax)
        if realign:
            tr_cut = tr.copy()
            tr_cut.trim(event.picks[0].time + cut[0],
                        event.picks[0].time + cut[1],
                        nearest_sample=False)
            if len(tr_cut.data) <= (0.5 * (cut[1] - cut[0]) *
                                    tr_cut.stats.sampling_rate):
                msg = ''.join(['Not enough in the trace for ',
                               tr.stats.station,
                               '.', tr.stats.channel, '\n',
                               'Suggest removing pick from sfile at time ',
                               str(event.picks[0].time)])
                warnings.warn(msg)
            else:
                al_traces.append(tr_cut)
        else:
            tr.trim(event.picks[0].time - pre_pick,
                    event.picks[0].time + clip - pre_pick,
                    nearest_sample=False)
        if len(tr.data) == 0:
            msg = ''.join(['No data in the trace for ', tr.stats.station,
                           '.', tr.stats.channel, '\n',
                           'Suggest removing pick from sfile at time ',
                           str(event.picks[0].time)])
            warnings.warn(msg)
            continue
        traces.append(tr)
    if realign:
        shift_len = int(0.25 * (cut[1] - cut[0]) *
                        al_traces[0].stats.sampling_rate)
        shifts = stacking.align_traces(al_traces, shift_len)
        for i in xrange(len(shifts)):
            print('Shifting by '+str(shifts[i])+' seconds')
            event.picks[0].time -= shifts[i]
            traces[i].trim(event.picks[0].time - pre_pick,
                           event.picks[0].time + clip-pre_pick,
                           nearest_sample=False)
    # We now have a list of traces
    traces = [(trace, trace.stats.starttime.datetime) for trace in traces]
    traces.sort(key=lambda tup: tup[1])
    traces = [trace[0] for trace in traces]
    # Plot the traces
    for i, tr in enumerate(traces):
        y = tr.data
        x = np.arange(len(y))
        x = x / tr.stats.sampling_rate  # convert to seconds
        axes[i+1].plot(x, y, 'k', linewidth=1.1)
        axes[i+1].yaxis.set_ticks([])
    traces = [Stream(trace) for trace in traces]
    if PWS:
        linstack = stacking.PWS_stack(traces)
    else:
        linstack = stacking.linstack(traces)
    tr = linstack.select(station=event[0].picks[0].waveform_id.station_code,
                         channel='*' +
                         event[0].picks[0].waveform_id.channel_code[-1])[0]
    y = tr.data
    x = np.arange(len(y))
    x = x / tr.stats.sampling_rate
    axes[0].plot(x, y, 'r', linewidth=2.0)
    axes[0].set_ylabel('Stack', rotation=0)
    axes[0].yaxis.set_ticks([])
    for i, slave in enumerate(traces):
        cc = normxcorr2(tr.data, slave[0].data)
        axes[i+1].set_ylabel('cc='+str(round(np.max(cc), 2)), rotation=0)
        axes[i+1].text(0.9, 0.15, str(round(np.max(slave[0].data))),
                       bbox=dict(facecolor='white', alpha=0.95),
                       transform=axes[i+1].transAxes)
        axes[i+1].text(0.7, 0.85, slave[0].stats.starttime.datetime.
                       strftime('%Y/%m/%d %H:%M:%S'),
                       bbox=dict(facecolor='white', alpha=0.95),
                       transform=axes[i+1].transAxes)
    axes[-1].set_xlabel('Time (s)')
    if title:
        axes[0].set_title(title)
    plt.subplots_adjust(hspace=0)
    plt.show()
    return traces, clist
Exemplo n.º 11
0
    avamp=np.mean([a[1] for a in amps[-5:]])
    for sta in stations:
        if not 'fivesta_template' in locals():
            fivesta_template=template.select(station=sta)
        else:
            fivesta_template+=template.select(station=sta)
    fivesta_templates.append((fivesta_template, avamp))
    del fivesta_template

fivesta_templates.sort(key=lambda tup:tup[1])
template_streams=[t[0] for t in fivesta_templates]
# # Extract the highest 100 amplitude events
# template_streams=[t[0] for t in fivesta_templates[-100:]]

# for template in template_streams:
    # template.write('templates/top_100/'+str(template[0].stats.starttime)+'.ms', format='MSEED')

groups=clustering.group_delays(template_streams)
ID=1
from collections import Counter

for group in groups:
   if len(group) >1:
       print 'Group '+str(ID)+' of '+str(len(groups))+' has '+str(len(group))+' templates'
       # template_stack=stacking.PWS_stack(group, 1.2)
       template_stack=stacking.linstack(group)
        # template_stack.filter('bandpass', freqmin=defaults.lowcut, freqmax=defaults.highcut)
        # template_stack.plot(size=(800,600), equal_scale=False)
       template_stack.write('templates/pan_templates/brightness_group'+str(ID), format='mseed')
   ID+=1
Exemplo n.º 12
0
def multi_event_singlechan(streams, picks, clip=10.0, pre_pick=2.0,\
                           freqmin=False, freqmax=False, realign=False, \
                           cut=(-3.0,5.0), PWS=False, title=False):
    """
    Function to plot data from a single channel at a single station for multiple
    events - data will be alligned by their pick-time given in the picks

    :type streams: List of :class:obspy.stream
    :param streams: List of the streams to use, can contain more traces than\
        you plan on plotting
    :type picks: List of :class:PICK
    :param picks: List of picks, one for each stream
    :type clip: float
    :param clip: Length in seconds to plot, defaults to 10.0
    :type pre_pick: Float
    :param pre_pick: Length in seconds to extract and plot before the pick,\
        defaults to 2.0
    :type freqmin: float
    :param freqmin: Low cut for bandpass in Hz
    :type freqmax: float
    :param freqmax: High cut for bandpass in Hz
    :type realign: Bool
    :param realign: To compute best alignement based on correlation or not.
    :type cut: tuple:
    :param cut: tuple of start and end times for cut in seconds from the pick
    :type PWS: bool
    :param PWS: compute Phase Weighted Stack, if False, will compute linear stack
    :type title: str
    :param title: Plot title.

    :returns: Alligned and cut traces, and new picks
    """
    from eqcorrscan.utils import stacking
    import copy
    from eqcorrscan.core.match_filter import normxcorr2
    from obspy import Stream
    fig, axes = plt.subplots(len(picks) + 1, 1, sharex=True, figsize=(7, 12))
    axes = axes.ravel()
    traces = []
    al_traces = []
    # Keep input safe
    plist = copy.deepcopy(picks)
    st_list = copy.deepcopy(streams)
    for i, pick in enumerate(plist):
        if st_list[i].select(station=pick.station, \
            channel='*'+pick.channel[-1]):
            tr=st_list[i].select(station=pick.station, \
                channel='*'+pick.channel[-1])[0]
        else:
            print 'No data for ' + pick.station + '.' + pick.channel
            continue
        tr.detrend('linear')
        if freqmin:
            tr.filter('bandpass', freqmin=freqmin, freqmax=freqmax)
        if realign:
            tr_cut = tr.copy()
            tr_cut.trim(pick.time+cut[0], pick.time+cut[1],\
                        nearest_sample=False)
            if len(tr_cut.data) <= 0.5 * (cut[1] -
                                          cut[0]) * tr_cut.stats.sampling_rate:
                print 'Not enough in the trace for ' + pick.station + '.' + pick.channel
                print 'Suggest removing pick from sfile at time ' + str(
                    pick.time)
            else:
                al_traces.append(tr_cut)
        else:
            tr.trim(pick.time-pre_pick, pick.time+clip-pre_pick,\
                    nearest_sample=False)
        if len(tr.data) == 0:
            print 'No data in the trace for ' + pick.station + '.' + pick.channel
            print 'Suggest removing pick from sfile at time ' + str(pick.time)
            continue
        traces.append(tr)
    if realign:
        shift_len = int(0.25 * (cut[1] - cut[0]) *
                        al_traces[0].stats.sampling_rate)
        shifts = stacking.align_traces(al_traces, shift_len)
        for i in xrange(len(shifts)):
            print 'Shifting by ' + str(shifts[i]) + ' seconds'
            pick.time -= shifts[i]
            traces[i].trim(pick.time - pre_pick, pick.time + clip-pre_pick,\
                           nearest_sample=False)
    # We now have a list of traces
    traces = [(trace, trace.stats.starttime.datetime) for trace in traces]
    traces.sort(key=lambda tup: tup[1])
    traces = [trace[0] for trace in traces]
    # Plot the traces
    for i, tr in enumerate(traces):
        y = tr.data
        x = np.arange(len(y))
        x = x / tr.stats.sampling_rate  # convert to seconds
        axes[i + 1].plot(x, y, 'k', linewidth=1.1)
        # axes[i+1].set_ylabel(tr.stats.starttime.datetime.strftime('%Y/%m/%d %H:%M'),\
        # rotation=0)
        axes[i + 1].yaxis.set_ticks([])
    traces = [Stream(trace) for trace in traces]
    if PWS:
        linstack = stacking.PWS_stack(traces)
    else:
        linstack = stacking.linstack(traces)
    tr=linstack.select(station=picks[0].station, \
            channel='*'+picks[0].channel[-1])[0]
    y = tr.data
    x = np.arange(len(y))
    x = x / tr.stats.sampling_rate
    axes[0].plot(x, y, 'r', linewidth=2.0)
    axes[0].set_ylabel('Stack', rotation=0)
    axes[0].yaxis.set_ticks([])
    for i, slave in enumerate(traces):
        cc = normxcorr2(tr.data, slave[0].data)
        axes[i + 1].set_ylabel('cc=' + str(round(np.max(cc), 2)), rotation=0)
        axes[i+1].text(0.9, 0.15, str(round(np.max(slave[0].data))), \
                       bbox=dict(facecolor='white', alpha=0.95),\
                       transform=axes[i+1].transAxes)
        axes[i+1].text(0.7, 0.85, slave[0].stats.starttime.datetime.strftime('%Y/%m/%d %H:%M:%S'), \
                       bbox=dict(facecolor='white', alpha=0.95),\
                       transform=axes[i+1].transAxes)
    axes[-1].set_xlabel('Time (s)')
    if title:
        axes[0].set_title(title)
    plt.subplots_adjust(hspace=0)
    plt.show()
    return traces, plist
Exemplo n.º 13
0
            fivesta_template = template.select(station=sta)
        else:
            fivesta_template += template.select(station=sta)
    fivesta_templates.append((fivesta_template, avamp))
    del fivesta_template

fivesta_templates.sort(key=lambda tup: tup[1])
template_streams = [t[0] for t in fivesta_templates]
# # Extract the highest 100 amplitude events
# template_streams=[t[0] for t in fivesta_templates[-100:]]

# for template in template_streams:
# template.write('templates/top_100/'+str(template[0].stats.starttime)+'.ms', format='MSEED')

groups = clustering.group_delays(template_streams)
ID = 1
from collections import Counter

for group in groups:
    if len(group) > 1:
        print 'Group ' + str(ID) + ' of ' + str(len(groups)) + ' has ' + str(
            len(group)) + ' templates'
        # template_stack=stacking.PWS_stack(group, 1.2)
        template_stack = stacking.linstack(group)
        # template_stack.filter('bandpass', freqmin=defaults.lowcut, freqmax=defaults.highcut)
        # template_stack.plot(size=(800,600), equal_scale=False)
        template_stack.write('templates/pan_templates/brightness_group' +
                             str(ID),
                             format='mseed')
    ID += 1
Exemplo n.º 14
0
def multi_event_singlechan(streams,
                           catalog,
                           clip=10.0,
                           pre_pick=2.0,
                           freqmin=False,
                           freqmax=False,
                           realign=False,
                           cut=(-3.0, 5.0),
                           PWS=False,
                           title=False,
                           save=False,
                           savefile=None):
    r"""Function to plot data from a single channel at a single station for \
    multiple events - data will be alligned by their pick-time given in the \
    picks.

    :type streams: list of :class:obspy.stream
    :param streams: List of the streams to use, can contain more traces than \
        you plan on plotting
    :type catalog: obspy.core.event.Catalog
    :param catalog: Catalog of events, one for each trace, with a single pick
    :type clip: float
    :param clip: Length in seconds to plot, defaults to 10.0
    :type pre_pick: float
    :param pre_pick: Length in seconds to extract and plot before the pick, \
        defaults to 2.0
    :type freqmin: float
    :param freqmin: Low cut for bandpass in Hz
    :type freqmax: float
    :param freqmax: High cut for bandpass in Hz
    :type realign: bool
    :param realign: To compute best alignement based on correlation or not.
    :type cut: tuple
    :param cut: tuple of start and end times for cut in seconds from the pick
    :type PWS: bool
    :param PWS: compute Phase Weighted Stack, if False, will compute linear \
        stack.
    :type title: str
    :param title: Plot title.
    :type save: bool
    :param save: False will plot to screen, true will save plot and not show \
        to screen.
    :type savefile: str
    :param savefile: Filename to save to, required for save=True

    :returns: Alligned and cut traces, and new picks
    """
    _check_save_args(save, savefile)
    from eqcorrscan.utils import stacking
    import copy
    from eqcorrscan.core.match_filter import normxcorr2
    from obspy import Stream
    import warnings
    fig, axes = plt.subplots(len(catalog) + 1, 1, sharex=True, figsize=(7, 12))
    axes = axes.ravel()
    traces = []
    al_traces = []
    # Keep input safe
    clist = copy.deepcopy(catalog)
    st_list = copy.deepcopy(streams)
    for i, event in enumerate(clist):
        if st_list[i].select(station=event.picks[0].waveform_id.station_code,
                             channel='*' +
                             event.picks[0].waveform_id.channel_code[-1]):
            tr = st_list[i].select(
                station=event.picks[0].waveforms_id.station_code,
                channel='*' + event.picks[0].waveform_id.channel_code[-1])[0]
        else:
            print('No data for ' + event.pick[0].waveform_id)
            continue
        tr.detrend('linear')
        if freqmin:
            tr.filter('bandpass', freqmin=freqmin, freqmax=freqmax)
        if realign:
            tr_cut = tr.copy()
            tr_cut.trim(event.picks[0].time + cut[0],
                        event.picks[0].time + cut[1],
                        nearest_sample=False)
            if len(tr_cut.data) <= (0.5 * (cut[1] - cut[0]) *
                                    tr_cut.stats.sampling_rate):
                msg = ''.join([
                    'Not enough in the trace for ', tr.stats.station, '.',
                    tr.stats.channel, '\n',
                    'Suggest removing pick from sfile at time ',
                    str(event.picks[0].time)
                ])
                warnings.warn(msg)
            else:
                al_traces.append(tr_cut)
        else:
            tr.trim(event.picks[0].time - pre_pick,
                    event.picks[0].time + clip - pre_pick,
                    nearest_sample=False)
        if len(tr.data) == 0:
            msg = ''.join([
                'No data in the trace for ', tr.stats.station, '.',
                tr.stats.channel, '\n',
                'Suggest removing pick from sfile at time ',
                str(event.picks[0].time)
            ])
            warnings.warn(msg)
            continue
        traces.append(tr)
    if realign:
        shift_len = int(0.25 * (cut[1] - cut[0]) *
                        al_traces[0].stats.sampling_rate)
        shifts = stacking.align_traces(al_traces, shift_len)
        for i in xrange(len(shifts)):
            print('Shifting by ' + str(shifts[i]) + ' seconds')
            event.picks[0].time -= shifts[i]
            traces[i].trim(event.picks[0].time - pre_pick,
                           event.picks[0].time + clip - pre_pick,
                           nearest_sample=False)
    # We now have a list of traces
    traces = [(trace, trace.stats.starttime.datetime) for trace in traces]
    traces.sort(key=lambda tup: tup[1])
    traces = [trace[0] for trace in traces]
    # Plot the traces
    for i, tr in enumerate(traces):
        y = tr.data
        x = np.arange(len(y))
        x = x / tr.stats.sampling_rate  # convert to seconds
        axes[i + 1].plot(x, y, 'k', linewidth=1.1)
        axes[i + 1].yaxis.set_ticks([])
    traces = [Stream(trace) for trace in traces]
    if PWS:
        linstack = stacking.PWS_stack(traces)
    else:
        linstack = stacking.linstack(traces)
    tr = linstack.select(station=event[0].picks[0].waveform_id.station_code,
                         channel='*' +
                         event[0].picks[0].waveform_id.channel_code[-1])[0]
    y = tr.data
    x = np.arange(len(y))
    x = x / tr.stats.sampling_rate
    axes[0].plot(x, y, 'r', linewidth=2.0)
    axes[0].set_ylabel('Stack', rotation=0)
    axes[0].yaxis.set_ticks([])
    for i, slave in enumerate(traces):
        cc = normxcorr2(tr.data, slave[0].data)
        axes[i + 1].set_ylabel('cc=' + str(round(np.max(cc), 2)), rotation=0)
        axes[i + 1].text(0.9,
                         0.15,
                         str(round(np.max(slave[0].data))),
                         bbox=dict(facecolor='white', alpha=0.95),
                         transform=axes[i + 1].transAxes)
        axes[i + 1].text(
            0.7,
            0.85,
            slave[0].stats.starttime.datetime.strftime('%Y/%m/%d %H:%M:%S'),
            bbox=dict(facecolor='white', alpha=0.95),
            transform=axes[i + 1].transAxes)
    axes[-1].set_xlabel('Time (s)')
    if title:
        axes[0].set_title(title)
    plt.subplots_adjust(hspace=0)
    if not save:
        plt.show()
    else:
        plt.savefig(savefile)
    return traces, clist