Exemple #1
0
def party_relative_mags(party, self_files, shift_len, align_len, svd_len,
                        reject, sac_dir, min_amps, calibrate=False,
                        method='PCA'):
    """
    Calculate the relative moments for detections in a Family using
    mag_calc.svd_moments()

    :param party: Party of detections
    :param shift_len: Maximum shift length used in waveform alignment
    :param align_len: Length of waveform used for correlation in alignment
    :param svd_len: Length of waveform used in relative amplitude calc
    :param reject: Min cc threshold for accepted measurement
    :param sac_dir: Root directory of waveforms
    :param min_amps: Minimum number of relative measurements per pair
    :param calibrate: Flag for calibration to a priori Ml's
    :param method: 'PCA' or 'LSQR'
    :return:
    """

    # First read-in self detection names
    selfs = []
    for self_file in self_files:
        with open(self_file, 'r') as f:
            rdr = csv.reader(f)
            for row in rdr:
                selfs.append(str(row[0]))
    for fam in party.families:
        print('Starting work on family %s' % fam.template.name)
        if len(fam) == 1:
            print('Only self-detection. Moving on.')
            continue
        temp = fam.template
        prepick = temp.prepick
        events = [det.event for det in fam.detections]
        # Here we'll read in the waveforms and trim from stefan's directory
        # of SAC files so as not to duplicate data
        ev_dirs = ['%s%s' % (sac_dir, str(ev.resource_id).split('/')[-1])
                   for ev in events]
        streams = []
        if len([i for i, ev_dir in enumerate(ev_dirs)
                    if ev_dir.split('/')[-1] in selfs]) == 0:
            print('Family %s has no self detection. Investigate'
                  % fam.template.name)
            continue
        self_ind = [i for i, ev_dir in enumerate(ev_dirs)
                    if ev_dir.split('/')[-1] in selfs][0]
        # Read in Z components of events which we wrote for stefan
        # Many of these ev_dirs will not exist!
        for i, ev_dir in enumerate(ev_dirs):
            raw_st = Stream()
            print('Reading %s' % ev_dir)
            for wav_file in glob('%s/*Z.sac' % ev_dir):
                print('...file %s' % wav_file)
                raw_tr = read(wav_file)[0]
                start = raw_tr.stats.starttime + raw_tr.stats.sac['a'] - 3.
                end = start + 10
                raw_tr.trim(starttime=start, endtime=end)
                raw_st.traces.append(raw_tr)
            streams.append(raw_st)
        print('Moved self detection to top of list')
        # Move the self detection to the first element
        streams.insert(0, streams.pop(self_ind))
        print('Template Stream: %s' % str(streams[0]))
        if len(streams[0]) == 0:
            print('Template %s waveforms did not get written to SAC.' %
                  temp.name)
            continue
        # Front/back clip hardcoded relative to wavs starting 3 s before pick
        front_clip = 3.0 - shift_len - 0.05 - prepick
        back_clip = front_clip + align_len + (2 * shift_len) + 0.05
        wrk_streams = [] # For aligning
        # Process streams then copy to both ccc_streams and svd_streams
        bad_streams = []
        for i, st in enumerate(list(streams)):
            try:
                shortproc(st=streams[i], lowcut=temp.lowcut,
                          highcut=temp.highcut, filt_order=temp.filt_order,
                          samp_rate=temp.samp_rate)
                wrk_streams.append(st.copy())
            except ValueError as e:
                print('ValueError reads:')
                print(str(e))
                print('Attempting to remove bad trace at {}'.format(
                    str(e).split(' ')[-1]))
                bad_tr = str(e).split(' ')[-1][:-1] # Eliminate trailing "'"
                print('Sta and chan names: {}'.format(bad_tr.split('.')))
                try:
                    tr = streams[i].select(station=bad_tr.split('.')[0],
                                           channel=bad_tr.split('.')[1])[0]
                    streams[i].traces.remove(tr)
                    shortproc(st=streams[i], lowcut=temp.lowcut,
                              highcut=temp.highcut,
                              filt_order=temp.filt_order,
                              samp_rate=temp.samp_rate)
                    wrk_streams.append(st.copy())
                except IndexError as e:
                    print(str(e))
                    print('Funkyness. Removing entire stream')
                    bad_streams.append(st)
        if len(bad_streams) > 0:
            for bst in bad_streams:
                streams.remove(bst)
        svd_streams = copy.deepcopy(streams) # For svd
        ccc_streams = copy.deepcopy(streams)
        # work out cccoh for each event with template
        cccohs = cc_coh_dets(streams=ccc_streams, shift=shift_len,
                             length=svd_len, wav_prepick=3.,
                             corr_prepick=0.05)
        for st in wrk_streams:
            for tr in st:
                tr.trim(starttime=tr.stats.starttime + front_clip,
                        endtime=tr.stats.starttime + back_clip)
        st_chans = list(set([(tr.stats.station, tr.stats.channel)
                             for st in wrk_streams for tr in st]))
        st_chans.sort()
        # Align streams with just P arrivals, then use longer st for svd
        print('Now aligning svd_streams')
        shift_inds = int(shift_len * fam.template.samp_rate)
        for st_chan in st_chans:
            trs = []
            for i, st in enumerate(wrk_streams):
                if len(st.select(station=st_chan[0], channel=st_chan[-1])) > 0:
                    trs.append((i, st.select(station=st_chan[0],
                                             channel=st_chan[-1])[0]))
            inds, traces = zip(*trs)
            shifts, ccs = stacking.align_traces(trace_list=list(traces),
                                                shift_len=shift_inds,
                                                positive=True,
                                                master=traces[0].copy())
            # We now have shifts based on P correlation, shift and trim
            # larger wavs for svd
            for j, shift in enumerate(shifts):
                st = svd_streams[inds[j]]
                if ccs[j] < reject:
                    svd_streams[inds[j]].remove(st.select(
                        station=st_chan[0], channel=st_chan[-1])[0])
                    print('Removing trace due to low cc value: %s' % ccs[j])
                    continue
                strt_tr = st.select(
                    station=st_chan[0], channel=st_chan[-1])[0].stats.starttime
                strt_tr += (3.0 - prepick - shift)
                st.select(station=st_chan[0],
                          channel=st_chan[-1])[0].trim(strt_tr,strt_tr
                                                       + svd_len)
        if method == 'LSQR':
            print('Using least-squares method')
            event_list = []
            for stachan in st_chans:
                st_list = []
                for i, st in enumerate(svd_streams):
                    if len(st.select(station=stachan[0],
                                     channel=stachan[-1])) > 0:
                        st_list.append(i)
                event_list.append(st_list)
            # event_list = np.asarray(event_list).tolist()
            u, sigma, v, sta_chans = svd(stream_list=svd_streams, full=True)
            try:
                M, events_out = svd_moments(u, sigma, v, sta_chans, event_list)
            except IOError as e:
                print('Family %s raised error %s' % (fam.template.name, e))
                continue
        elif method == 'PCA':
            print('Using principal component method')
            # Now loop over all detections and do svd for each matching
            # chan with temp
            events_out = []
            template = svd_streams[0]
            M = []
            for i, st in enumerate(svd_streams):
                if len(st) == 0:
                    print('Event not located, skipping')
                    continue
                ev_r_amps = []
                # For each pair of template:detection (including temp:temp)
                for tr in template:
                    if len(st.select(station=tr.stats.station,
                                     channel=tr.stats.channel)) > 0:
                        det_tr = st.select(station=tr.stats.station,
                                           channel=tr.stats.channel)[0]
                        # Convoluted way of getting two 'vert' vectors
                        data_mat = np.vstack((tr.data, det_tr.data)).T
                        U, sig, Vt = scipy.linalg.svd(data_mat,
                                                      full_matrices=True)
                        # Vt is 2x2 for two events
                        # Per Shelly et al., 2016 eq. 4
                        ev_r_amps.append(Vt[0][1] / Vt[0][0])
                if len(ev_r_amps) < min_amps:
                    print('Fewer than 4 amplitude picks, skipping.')
                    continue
                M.append(np.median(ev_r_amps))
                events_out.append(i)
        # If we have a Mag for template, calibrate moments
        if calibrate and len(fam.template.event.magnitudes) > 0:
            # Convert the template magnitude to seismic moment
            temp_mag = fam.template.event.magnitudes[-1].mag
            temp_mo = local_to_moment(temp_mag)
            # Extrapolate from the template moment - relative moment relationship to
            # Get the moment for relative moment = 1.0
            norm_mo = temp_mo / M[0]
            # Template is the last event in the list
            # Now these are weights which we can multiple the moments by
            moments = np.multiply(M, norm_mo)
            # Now convert to Mw
            Mw = [2.0 / 3.0 * (np.log10(m) - 9.0) for m in moments]
            Mw2, evs2 = remove_outliers(Mw, events_out)
            # Convert to local
            Ml = [0.88 * m + 0.73 for m in Mw2]
            #Normalize moments to template mag
            # Add calibrated mags to detection events
            for i, eind in enumerate(evs2):
                fam.detections[eind-1].event.magnitudes = [
                    Magnitude(mag=Mw2[i], magnitude_type='Mw')]
                fam.detections[eind-1].event.comments.append(
                    Comment(text=str(cccohs[eind-1])))
                fam.detections[eind-1].event.magnitudes.append(
                    Magnitude(mag=Ml[i], magnitude_type='ML'))
            fam.catalog = Catalog(events=[det.event for det in fam.detections])
    return party, cccohs
Exemple #2
0
def multi_event_singlechan(streams, catalog, clip=10.0, pre_pick=2.0,
                           freqmin=False, freqmax=False, realign=False,
                           cut=(-3.0, 5.0), PWS=False, title=False):
    r"""Function to plot data from a single channel at a single station for \
    multiple events - data will be alligned by their pick-time given in the \
    picks.

    :type streams: list of :class:obspy.stream
    :param streams: List of the streams to use, can contain more traces than \
        you plan on plotting
    :type catalog: obspy.core.event.Catalog
    :param catalog: Catalog of events, one for each trace, with a single pick
    :type clip: float
    :param clip: Length in seconds to plot, defaults to 10.0
    :type pre_pick: float
    :param pre_pick: Length in seconds to extract and plot before the pick, \
        defaults to 2.0
    :type freqmin: float
    :param freqmin: Low cut for bandpass in Hz
    :type freqmax: float
    :param freqmax: High cut for bandpass in Hz
    :type realign: bool
    :param realign: To compute best alignement based on correlation or not.
    :type cut: tuple
    :param cut: tuple of start and end times for cut in seconds from the pick
    :type PWS: bool
    :param PWS: compute Phase Weighted Stack, if False, will compute linear \
        stack.
    :type title: str
    :param title: Plot title.

    :returns: Alligned and cut traces, and new picks
    """
    from eqcorrscan.utils import stacking
    import copy
    from eqcorrscan.core.match_filter import normxcorr2
    from obspy import Stream
    import warnings
    fig, axes = plt.subplots(len(catalog)+1, 1, sharex=True, figsize=(7, 12))
    axes = axes.ravel()
    traces = []
    al_traces = []
    # Keep input safe
    clist = copy.deepcopy(catalog)
    st_list = copy.deepcopy(streams)
    for i, event in enumerate(clist):
        if st_list[i].select(station=event.picks[0].waveform_id.station_code,
                             channel='*' +
                             event.picks[0].waveform_id.channel_code[-1]):
            tr = st_list[i].select(station=event.picks[0].waveforms_id.
                                   station_code,
                                   channel='*' +
                                   event.picks[0].waveform_id.
                                   channel_code[-1])[0]
        else:
            print('No data for '+event.pick[0].waveform_id)
            continue
        tr.detrend('linear')
        if freqmin:
            tr.filter('bandpass', freqmin=freqmin, freqmax=freqmax)
        if realign:
            tr_cut = tr.copy()
            tr_cut.trim(event.picks[0].time + cut[0],
                        event.picks[0].time + cut[1],
                        nearest_sample=False)
            if len(tr_cut.data) <= (0.5 * (cut[1] - cut[0]) *
                                    tr_cut.stats.sampling_rate):
                msg = ''.join(['Not enough in the trace for ',
                               tr.stats.station,
                               '.', tr.stats.channel, '\n',
                               'Suggest removing pick from sfile at time ',
                               str(event.picks[0].time)])
                warnings.warn(msg)
            else:
                al_traces.append(tr_cut)
        else:
            tr.trim(event.picks[0].time - pre_pick,
                    event.picks[0].time + clip - pre_pick,
                    nearest_sample=False)
        if len(tr.data) == 0:
            msg = ''.join(['No data in the trace for ', tr.stats.station,
                           '.', tr.stats.channel, '\n',
                           'Suggest removing pick from sfile at time ',
                           str(event.picks[0].time)])
            warnings.warn(msg)
            continue
        traces.append(tr)
    if realign:
        shift_len = int(0.25 * (cut[1] - cut[0]) *
                        al_traces[0].stats.sampling_rate)
        shifts = stacking.align_traces(al_traces, shift_len)
        for i in xrange(len(shifts)):
            print('Shifting by '+str(shifts[i])+' seconds')
            event.picks[0].time -= shifts[i]
            traces[i].trim(event.picks[0].time - pre_pick,
                           event.picks[0].time + clip-pre_pick,
                           nearest_sample=False)
    # We now have a list of traces
    traces = [(trace, trace.stats.starttime.datetime) for trace in traces]
    traces.sort(key=lambda tup: tup[1])
    traces = [trace[0] for trace in traces]
    # Plot the traces
    for i, tr in enumerate(traces):
        y = tr.data
        x = np.arange(len(y))
        x = x / tr.stats.sampling_rate  # convert to seconds
        axes[i+1].plot(x, y, 'k', linewidth=1.1)
        axes[i+1].yaxis.set_ticks([])
    traces = [Stream(trace) for trace in traces]
    if PWS:
        linstack = stacking.PWS_stack(traces)
    else:
        linstack = stacking.linstack(traces)
    tr = linstack.select(station=event[0].picks[0].waveform_id.station_code,
                         channel='*' +
                         event[0].picks[0].waveform_id.channel_code[-1])[0]
    y = tr.data
    x = np.arange(len(y))
    x = x / tr.stats.sampling_rate
    axes[0].plot(x, y, 'r', linewidth=2.0)
    axes[0].set_ylabel('Stack', rotation=0)
    axes[0].yaxis.set_ticks([])
    for i, slave in enumerate(traces):
        cc = normxcorr2(tr.data, slave[0].data)
        axes[i+1].set_ylabel('cc='+str(round(np.max(cc), 2)), rotation=0)
        axes[i+1].text(0.9, 0.15, str(round(np.max(slave[0].data))),
                       bbox=dict(facecolor='white', alpha=0.95),
                       transform=axes[i+1].transAxes)
        axes[i+1].text(0.7, 0.85, slave[0].stats.starttime.datetime.
                       strftime('%Y/%m/%d %H:%M:%S'),
                       bbox=dict(facecolor='white', alpha=0.95),
                       transform=axes[i+1].transAxes)
    axes[-1].set_xlabel('Time (s)')
    if title:
        axes[0].set_title(title)
    plt.subplots_adjust(hspace=0)
    plt.show()
    return traces, clist
Exemple #3
0
def party_relative_mags(party, self_files, shift_len, align_len, svd_len,
                        reject, wav_dir, min_amps, m, c, calibrate=False,
                        method='PCA', plot_svd=False):
    """
    Calculate the relative moments for detections in a Family using
    mag_calc.svd_moments()

    :param party: Party of detections
    :param self_files: List of self-detection wav files (in order of families)
    :param shift_len: Maximum shift length used in waveform alignment
    :param align_len: Length of waveform used for correlation in alignment
    :param svd_len: Length of waveform used in relative amplitude calc
    :param reject: Min cc threshold for accepted measurement
    :param wav_dir: Root directory of waveforms
    :param min_amps: Minimum number of relative measurements per pair
    :param m: m in Mw = (m * ML) + c regression between Ml and Mw
    :param c: c in Mw = (m * ML) + c regression between Ml and Mw
    :param calibrate: Flag for calibration to a priori Ml's
    :param method: 'PCA' or 'LSQR'
    :param plot_svd: Bool to plot results of svd relative amplitude calcs
    :return:
    """
    pty = party.copy()
    # sort self files and parties by template name
    pty.families.sort(key=lambda x: x.template.name)
    self_files.sort()
    ev_files = glob('{}/*'.format(wav_dir))
    ev_files.sort()
    ev_files = {os.path.basename(f).rstrip('.ms'): f for f in ev_files}
    for i, fam in enumerate(pty.families):
        temp_wav = read(self_files[i])
        print('Starting work on family %s' % fam.template.name)
        if len(fam) == 0:
            print('No detections. Moving on.')
            continue
        temp = fam.template
        prepick = temp.prepick
        det_ids = [d.id for d in fam]
        # Read in waveforms for detections in family
        streams = [read(ev_files[id]) for id in det_ids]
        # Add template wav as the first element
        streams.insert(0, temp_wav)
        print('Template Stream: %s' % str(streams[0]))
        if len(streams[0]) == 0:
            print('Template %s waveforms did not get written. Investigate.' %
                  temp.name)
            continue
        # Process streams then copy to both ccc_streams and svd_streams
        print('Shortproc-ing streams')
        breakit = False
        for st in streams:
            # rms = [tr for tr in st if tr.stats.sampling_rate < temp.samp_rate]
            # for rm in rms:
            #     st.traces.remove(rm)
            try:
                shortproc(st=st, lowcut=temp.lowcut,
                          highcut=temp.highcut, filt_order=temp.filt_order,
                          samp_rate=temp.samp_rate)
            except ValueError as e:
                    breakit = True
        if breakit:
            print('Something wrong in shortproc. Skip family')
            continue
        # Remove all traces with no picks before copying
        for str_ind, st in enumerate(streams):
            if str_ind == 0:
                event = temp.event
            else:
                event = fam.detections[str_ind-1].event
            rms = []
            for tr in st:
                try:
                    [pk for pk in event.picks
                     if pk.waveform_id.get_seed_string() == tr.id][0]
                except IndexError:
                    rms.append(tr)
            for rm in rms:
                st.traces.remove(rm)
        print('Copying streams')
        wrk_streams = copy.deepcopy(streams)
        svd_streams = copy.deepcopy(streams)  # For svd
        ccc_streams = copy.deepcopy(streams)
        event_list = [temp.event] + [d.event for d in fam.detections]
        try:
            # work out cccoh for each event with template
            cccohs = cc_coh_dets(streams=ccc_streams, events=event_list,
                                 length=svd_len, corr_prepick=prepick,
                                 shift=shift_len)
        except (AssertionError, ValueError) as e:
            # Issue with trimming above?
            print(e)
            continue
        for eind, st in enumerate(wrk_streams):
            if eind == 0:
                event = temp.event
            else:
                event = fam.detections[eind-1].event
            for tr in st:
                pk = [pk for pk in event.picks
                      if pk.waveform_id.get_seed_string() == tr.id][0]
                tr.trim(starttime=pk.time - prepick - shift_len,
                        endtime=pk.time + shift_len + align_len)
        st_seeds = list(set([tr.id for st in wrk_streams for tr in st]))
        st_seeds.sort()
        # Align streams with just P arrivals, then use longer st for svd
        print('Now aligning svd_streams')
        shift_inds = int(shift_len * fam.template.samp_rate)
        for st_seed in st_seeds:
            trs = []
            for i, st in enumerate(wrk_streams):
                if len(st.select(id=st_seed)) > 0:
                    trs.append((i, st.select(id=st_seed)[0]))
            inds, traces = zip(*trs)
            shifts, ccs = stacking.align_traces(trace_list=list(traces),
                                                shift_len=shift_inds,
                                                positive=True,
                                                master=traces[0].copy())
            # We now have shifts based on P correlation, shift and trim
            # larger wavs for svd
            for j, shift in enumerate(shifts):
                st = svd_streams[inds[j]]
                if inds[j] == 0:
                    event = temp.event
                else:
                    event = fam.detections[inds[j]-1].event
                if ccs[j] < reject:
                    svd_streams[inds[j]].remove(st.select(id=st_seed)[0])
                    print('Removing trace due to low cc value: %s' % ccs[j])
                    continue
                pk = [pk for pk in event.picks
                      if pk.waveform_id.get_seed_string() == st_seed][0]
                strt_tr = pk.time - prepick - shift
                st.select(id=st_seed)[0].trim(strt_tr, strt_tr + svd_len)
        if method == 'LSQR':
            print('Using least-squares method')
            event_list = []
            for st_id in st_seeds:
                st_list = []
                for stind, st in enumerate(svd_streams):
                    if len(st.select(id=st_id)) > 0:
                        st_list.append(stind)
                event_list.append(st_list)
            # event_list = np.asarray(event_list).tolist()
            u, sigma, v, sta_chans = svd(stream_list=svd_streams, full=True)
            try:
                M, events_out = svd_moments(u, sigma, v, sta_chans, event_list)
            except IOError as e:
                print('Family %s raised error %s' % (fam.template.name, e))
                return
        elif method == 'PCA':
            print('Using principal component method')
            M, events_out = svd_relative_amps(fam, svd_streams, min_amps,
                                              plot=plot_svd)
            print(M, events_out)
            if len(M) == 0:
                print('No amplitudes calculated, skipping')
                continue
        else:
            print('{} not valid argument for mag calc method'.format(method))
            return
        # If we have a Mag for template, calibrate moments
        if calibrate and len(fam.template.event.magnitudes) > 0:
            print('Converting relative amps to magnitudes')
            # Convert the template magnitude to seismic moment
            temp_mag = fam.template.event.magnitudes[-1].mag
            temp_Mw = ML_to_Mw(temp_mag, m, c)
            temp_mo = Mw_to_M0(temp_Mw)
            # Extrapolate from the template moment - relative moment relationship to
            # Get the moment for relative moment = 1.0
            norm_mo = temp_mo / M[0]
            # Template is the last event in the list
            # Now these are weights which we can multiple the moments by
            moments = np.multiply(M, norm_mo)
            # Now convert to Mw
            Mw = [Mw_to_M0(mo, inverse=True) for mo in moments]
            # Convert to local
            Ml = [ML_to_Mw(mm, m, c, inverse=True) for mm in Mw]
            #Normalize moments to template mag
            # Add calibrated mags to detection events
            for jabba, eind in enumerate(events_out):
                # Skip template waveform
                if eind == 0:
                    continue
                fam.detections[eind].event.magnitudes = [
                    Magnitude(mag=Mw[jabba], magnitude_type='Mw')]
                fam.detections[eind].event.comments.append(
                    Comment(text=str(cccohs[eind])))
                fam.detections[eind].event.magnitudes.append(
                    Magnitude(mag=Ml[jabba], magnitude_type='ML'))
                fam.detections[eind].event.preferred_magnitude_id = (
                    fam.detections[eind].event.magnitudes[-1].resource_id.id)
    return pty, cccohs
    for node in group.nodes:
        t1 = dfref.loc[dfref.window_id == node].tlb.values[0] - 4
        t2 = dfref.loc[dfref.window_id == node].tub.values[0] + 4
        #print(f"{t1}, {t2}, {t2-t1}")
        tmp = read(os.path.join(wf_dir, t1.strftime("%Y/%j"), "*%s*Z*" % sta),
                   starttime=t1,
                   endtime=t2)[0]
        tmp.detrend()
        tmp.filter("highpass", freq=2)
        tmp.filter("lowpass", freq=10)
        #tmp.plot()
        trace_list.append(tmp)
        node_list.append(node)
    shifts, ccs = align_traces(trace_list,
                               shift_len=int(4. * 100),
                               master=False,
                               positive=False,
                               plot=False)

    stream = Stream()
    for node, shift in zip(node_list, shifts):
        t1 = dfref.loc[dfref.window_id == node].tlb.values[0] - shift
        t2 = t1 + 6
        tmp = read(os.path.join(wf_dir, t1.strftime("%Y/%j"), "*%s*" % sta),
                   starttime=t1,
                   endtime=t2)
        tmp.detrend()
        tmp.filter("highpass", freq=2)
        #        tmp.filter("lowpass", freq=10)
        #tmp.plot()
        stream += tmp
def multi_event_singlechan(streams, picks, clip=10.0, pre_pick=2.0,\
                           freqmin=False, freqmax=False, realign=False, \
                           cut=(-3.0,5.0), PWS=False, title=False):
    """
    Function to plot data from a single channel at a single station for multiple
    events - data will be alligned by their pick-time given in the picks

    :type streams: List of :class:obspy.stream
    :param streams: List of the streams to use, can contain more traces than\
        you plan on plotting
    :type picks: List of :class:PICK
    :param picks: List of picks, one for each stream
    :type clip: float
    :param clip: Length in seconds to plot, defaults to 10.0
    :type pre_pick: Float
    :param pre_pick: Length in seconds to extract and plot before the pick,\
        defaults to 2.0
    :type freqmin: float
    :param freqmin: Low cut for bandpass in Hz
    :type freqmax: float
    :param freqmax: High cut for bandpass in Hz
    :type realign: Bool
    :param realign: To compute best alignement based on correlation or not.
    :type cut: tuple:
    :param cut: tuple of start and end times for cut in seconds from the pick
    :type PWS: bool
    :param PWS: compute Phase Weighted Stack, if False, will compute linear stack
    :type title: str
    :param title: Plot title.

    :returns: Alligned and cut traces, and new picks
    """
    from eqcorrscan.utils import stacking
    import copy
    from eqcorrscan.core.match_filter import normxcorr2
    from obspy import Stream
    fig, axes = plt.subplots(len(picks) + 1, 1, sharex=True, figsize=(7, 12))
    axes = axes.ravel()
    traces = []
    al_traces = []
    # Keep input safe
    plist = copy.deepcopy(picks)
    st_list = copy.deepcopy(streams)
    for i, pick in enumerate(plist):
        if st_list[i].select(station=pick.station, \
            channel='*'+pick.channel[-1]):
            tr=st_list[i].select(station=pick.station, \
                channel='*'+pick.channel[-1])[0]
        else:
            print 'No data for ' + pick.station + '.' + pick.channel
            continue
        tr.detrend('linear')
        if freqmin:
            tr.filter('bandpass', freqmin=freqmin, freqmax=freqmax)
        if realign:
            tr_cut = tr.copy()
            tr_cut.trim(pick.time+cut[0], pick.time+cut[1],\
                        nearest_sample=False)
            if len(tr_cut.data) <= 0.5 * (cut[1] -
                                          cut[0]) * tr_cut.stats.sampling_rate:
                print 'Not enough in the trace for ' + pick.station + '.' + pick.channel
                print 'Suggest removing pick from sfile at time ' + str(
                    pick.time)
            else:
                al_traces.append(tr_cut)
        else:
            tr.trim(pick.time-pre_pick, pick.time+clip-pre_pick,\
                    nearest_sample=False)
        if len(tr.data) == 0:
            print 'No data in the trace for ' + pick.station + '.' + pick.channel
            print 'Suggest removing pick from sfile at time ' + str(pick.time)
            continue
        traces.append(tr)
    if realign:
        shift_len = int(0.25 * (cut[1] - cut[0]) *
                        al_traces[0].stats.sampling_rate)
        shifts = stacking.align_traces(al_traces, shift_len)
        for i in xrange(len(shifts)):
            print 'Shifting by ' + str(shifts[i]) + ' seconds'
            pick.time -= shifts[i]
            traces[i].trim(pick.time - pre_pick, pick.time + clip-pre_pick,\
                           nearest_sample=False)
    # We now have a list of traces
    traces = [(trace, trace.stats.starttime.datetime) for trace in traces]
    traces.sort(key=lambda tup: tup[1])
    traces = [trace[0] for trace in traces]
    # Plot the traces
    for i, tr in enumerate(traces):
        y = tr.data
        x = np.arange(len(y))
        x = x / tr.stats.sampling_rate  # convert to seconds
        axes[i + 1].plot(x, y, 'k', linewidth=1.1)
        # axes[i+1].set_ylabel(tr.stats.starttime.datetime.strftime('%Y/%m/%d %H:%M'),\
        # rotation=0)
        axes[i + 1].yaxis.set_ticks([])
    traces = [Stream(trace) for trace in traces]
    if PWS:
        linstack = stacking.PWS_stack(traces)
    else:
        linstack = stacking.linstack(traces)
    tr=linstack.select(station=picks[0].station, \
            channel='*'+picks[0].channel[-1])[0]
    y = tr.data
    x = np.arange(len(y))
    x = x / tr.stats.sampling_rate
    axes[0].plot(x, y, 'r', linewidth=2.0)
    axes[0].set_ylabel('Stack', rotation=0)
    axes[0].yaxis.set_ticks([])
    for i, slave in enumerate(traces):
        cc = normxcorr2(tr.data, slave[0].data)
        axes[i + 1].set_ylabel('cc=' + str(round(np.max(cc), 2)), rotation=0)
        axes[i+1].text(0.9, 0.15, str(round(np.max(slave[0].data))), \
                       bbox=dict(facecolor='white', alpha=0.95),\
                       transform=axes[i+1].transAxes)
        axes[i+1].text(0.7, 0.85, slave[0].stats.starttime.datetime.strftime('%Y/%m/%d %H:%M:%S'), \
                       bbox=dict(facecolor='white', alpha=0.95),\
                       transform=axes[i+1].transAxes)
    axes[-1].set_xlabel('Time (s)')
    if title:
        axes[0].set_title(title)
    plt.subplots_adjust(hspace=0)
    plt.show()
    return traces, plist
Exemple #6
0
def align_design(design_set,
                 shift_len,
                 reject,
                 multiplex,
                 no_missed=True,
                 plot=False):
    """
    Align individual traces within streams of the design set.

    Perform before Detector.construct to align traces before computing the \
    singular value decomposition.

    :type design_set: list
    :param design_set: List of obspy.core.stream.Stream to be aligned
    :type shift_len: float
    :param shift_len: Maximum shift (plus/minus) in seconds.
    :type reject: float
    :param reject: Minimum correlation for traces, only used if align=True.
    :type multiplex: bool
    :param multiplex: If you are going to multiplex the data, then there has \
        to be data for all channels, so we will pad with zeros, otherwise \
        there is no need.
    :type no_missed: bool
    :param: no_missed: Reject streams with missed traces, defaults to True. \
        A missing trace from lots of events will reduce the quality of the \
        subspace detector if multiplexed.  Only used when multi is set to True.
    :type plot: bool
    :param plot: Whether to plot the aligned traces as we go or not.

    :rtype: list
    :return: List of obspy.core.stream.Stream of aligned streams

    .. Note:: Assumes only one trace for each channel for each stream in the \
        design_set. If more are present will only use the first one.

    .. Note:: Will cut all traces to be the same length as required for the \
        svd, this length will be the shortest trace length - 2 * shift_len
    """
    trace_lengths = [
        tr.stats.endtime - tr.stats.starttime for st in design_set for tr in st
    ]
    clip_len = min(trace_lengths) - (2 * shift_len)
    stachans = list(
        set([(tr.stats.station, tr.stats.channel) for st in design_set
             for tr in st]))
    remove_set = []
    for stachan in stachans:
        trace_list = []
        trace_ids = []
        for i, st in enumerate(design_set):
            tr = st.select(station=stachan[0], channel=stachan[1])
            if len(tr) > 0:
                trace_list.append(tr[0])
                trace_ids.append(i)
            if len(tr) > 1:
                warnings.warn('Too many matches for %s %s' %
                              (stachan[0], stachan[1]))
        shift_len_samples = int(shift_len * trace_list[0].stats.sampling_rate)
        shifts, cccs = stacking.align_traces(trace_list=trace_list,
                                             shift_len=shift_len_samples,
                                             positive=True)
        for i, shift in enumerate(shifts):
            st = design_set[trace_ids[i]]
            start_t = st.select(station=stachan[0],
                                channel=stachan[1])[0].stats.starttime
            start_t += shift_len
            start_t -= shift
            st.select(station=stachan[0],
                      channel=stachan[1])[0].trim(start_t, start_t + clip_len)
            if cccs[i] < reject:
                if multiplex and not no_missed:
                    st.select(
                        station=stachan[0], channel=stachan[1]
                    )[0].data = np.zeros(
                        int(clip_len *
                            (st.select(station=stachan[0], channel=stachan[1])
                             [0].stats.sampling_rate) + 1))
                    warnings.warn('Padding stream with zero trace for ' +
                                  'station ' + stachan[0] + '.' + stachan[1])
                    print('zero padding')
                elif multiplex and no_missed:
                    remove_set.append(st)
                    warnings.warn('Will remove stream due to low-correlation')
                    continue
                else:
                    st.remove(
                        st.select(station=stachan[0], channel=stachan[1])[0])
                    print('Removed channel with correlation at %s' % cccs[i])
                    continue
    if no_missed:
        for st in remove_set:
            if st in design_set:
                design_set.remove(st)
    if plot:
        for stachan in stachans:
            trace_list = []
            for st in design_set:
                tr = st.select(station=stachan[0], channel=stachan[1])
                if len(tr) > 0:
                    trace_list.append(tr[0])
            if len(trace_list) > 1:
                plotting.multi_trace_plot(traces=trace_list,
                                          corr=True,
                                          stack=None,
                                          title='.'.join(stachan))
            else:
                print('No plot for you, only one trace left after rejection')
    return design_set
Exemple #7
0
def stack_plot(tribe,
               wav_dir_pat,
               station,
               channel,
               title,
               shift=True,
               shift_len=0.3,
               savefig=None):
    """
    Plot list of traces for a stachan one just above the other

    :param tribe: Tribe to plot
    :param wav_dir_pat: Glob pattern for all possible wavs
    :param station: Station to plot
    :param channel: channel to plot
    :param title: Plot title
    :param shift: Whether to allow alignment of the wavs
    :param shift_len: Length in seconds to allow wav to shift
    :param savefig: Name of the file to write
    :return:
    """
    wavs = glob(wav_dir_pat)
    streams = []
    events = [temp.event for temp in tribe]
    for temp in tribe:
        streams.append(
            read([
                f for f in wavs if f.split('/')[-1].split('.')[0] == str(
                    temp.event.resource_id).split('/')[-1]
            ][0]))
    # Sort traces by starttime
    streams.sort(key=lambda x: x[0].stats.starttime)
    # Select all traces
    traces = []
    tr_evs = []
    for st, ev in zip(streams, events):
        if len(st.select(station=station, channel=channel)) == 1:
            tr = st.select(station=station, channel=channel)[0]
            tr.trim(starttime=tr.stats.starttime + 1.5,
                    endtime=tr.stats.endtime - 5)
            traces.append(tr)
            tr_evs.append(ev)
    if shift:  # align traces on cc
        shift_samp = int(shift_len * traces[0].stats.sampling_rate)
        pks = [
            pk.time for ev in tr_evs for pk in ev.picks
            if pk.waveform_id.station_code == station
            and pk.waveform_id.channel_code == channel
        ]
        cut_traces = [
            tr.slice(starttime=p_time - 0.2, endtime=p_time + 0.4)
            for tr, p_time in zip(traces, pks)
        ]
        shifts, ccs = align_traces(cut_traces, shift_len=shift_samp)
        dt_vects = []
        for shif, tr in zip(shifts, traces):
            arb_dt = UTCDateTime(1970, 1, 1)
            td = datetime.timedelta(
                microseconds=int(1 / tr.stats.sampling_rate * 1000000))
            # Make new arbitrary time vectors as they otherwise occur on
            # different dates
            dt_vects.append([(arb_dt + shif).datetime + (i * td)
                             for i in range(len(tr.data))])
    # Normalize traces and make dates vect
    date_labels = []
    for tr in traces:
        date_labels.append(str(tr.stats.starttime.date))
        tr.data = tr.data / max(tr.data)
    fig, ax = plt.subplots(figsize=(6, 15))
    vert_steps = np.linspace(0, len(traces), len(traces))
    if shift:
        # Plotting chronologically from top
        for tr, vert_step, dt_v in zip(list(reversed(traces)), vert_steps,
                                       dt_vects):
            ax.plot(dt_v, tr.data + vert_step, color='k')
    else:
        for tr, vert_step in zip(list(reversed(traces)), vert_steps):
            ax.plot(tr.data + vert_step, color='k')
    if shift:
        ax.set_xlabel('Seconds', fontsize=19)
    else:
        ax.set_xlabel('Samples', fontsize=19)
    ax.set_ylabel('Date', fontsize=19)
    # Change y labels to dates
    ax.yaxis.set_ticks(vert_steps)
    ax.set_yticklabels(date_labels[::-1], fontsize=16)
    ax.set_title(title, fontsize=19)
    if savefig:
        fig.tight_layout()
        plt.savefig(savefig)
        plt.close()
    else:
        fig.tight_layout()
        plt.show()
    return
Exemple #8
0
def family_calc(template, detections, wavdir, cut=(-0.5, 3.0),\
                freqmin=5.0, freqmax=15.0, corr_thresh=0.9, \
                template_pre_pick=0.1, samp_rate=100.0, plotvar=False,\
                resample=True):
    """
    Function to calculate the magnitudes for a given family, where the template
    is an s-file with a magnitude (and an appropriate waveform in the same
    directory), and the detections is a list of s-files for that template.

    :type template: str
    :param template: path to the template for this family
    :type detections: List of str
    :param detections: List of paths for s-files detected for this family
    :type wavdir: str
    :param wavdir: Path to the detection waveforms
    :type cut: tuple of float
    :param cut: Cut window around P-pick
    :type freqmin: float
    ;param freqmin: Low-cut in Hz
    :type freqmax: float
    :param freqmax: High-cut in Hz
    :type corr_thresh: float
    :param corr:thresh: Minimum correlation (with stack) for use in SVD
    :type template_pre_pick: float
    :param template_pre_pick: Pre-pick used for template in seconds
    :type samp_rate: float
    :param samp_rate: Desired sampling rate in Hz

    :returns: np.ndarry of relative magnitudes
    """
    from obspy import read, Stream
    from eqcorrscan.utils import stacking, clustering
    from eqcorrscan.core.match_filter import normxcorr2
    import numpy as np
    from obspy.signal.cross_correlation import xcorr

    # First read in the template and check that is has a magnitude
    template_mag = Sfile_util.readheader(template).Mag_1
    template_magtype = Sfile_util.readheader(template).Mag_1_type
    if template_mag=='nan' or template_magtype != 'L':
        raise IOError('Template does not have a local magnitude, calculate this')

    # Now we need to load all the waveforms and picks
    all_detection_streams=[] # Empty list for all the streams
    all_p_picks=[] # List for all the P-picks
    event_headers=[] # List of event headers which we will return
    for detection in detections:
        event_headers.append(Sfile_util.readheader(detection))
        d_picks=Sfile_util.readpicks(detection)
        try:
            d_stream=read(wavdir+'/'+Sfile_util.readwavename(detection)[0])
        except IOError:
            # Allow for seisan year/month directories
            d_stream=read(wavdir+'/????/??/'+Sfile_util.readwavename(detection)[0])
        except:
            raise IOError('Cannot read waveform')
        # Resample the stream
        if resample:
            d_stream = d_stream.detrend('linear')
            d_stream = d_stream.resample(samp_rate)
        # We only want channels with a p-pick, these should be vertical channels
        picked=[]
        p_picks= []
        for pick in d_picks:
            pick.time-=template_pre_pick
            print pick.time
            if pick.phase[-1]=='P':
        # p_picks=[]SVD_moment
                p_picks.append(pick)
                tr=d_stream.select(station=pick.station,\
                                   channel='??'+pick.channel[-1])
                print tr
                if len(tr) >= 1:
                    tr=tr[0]
                else:
                    print 'No channel for pick'
                    print pick
                    break
                # Filter the trace
                tr=tr.filter('bandpass', freqmin=freqmin, freqmax=freqmax)
                # Trim the trace around the P-pick
                tr.trim(pick.time+cut[0]-0.05, pick.time+cut[1]+0.5)
                picked.append(tr)
        picked=Stream(picked)
        # Add this to the list of streams
        all_detection_streams.append(picked)
        all_p_picks.append(p_picks)
    # Add the template in
    template_stream = read('/'.join(template.split('/')[0:-1])+'/'+\
                           Sfile_util.readwavename(template)[0])
    # Resample
    if resample:
        template_stream = template_stream.detrend('linear')
        template_stream = template_stream.resample(samp_rate)
    template_picks = Sfile_util.readpicks(template)
    picked=[]
    p_picks=[]
    for pick in template_picks:
        pick.time-=template_pre_pick
        if pick.phase=='P':
            p_picks.append(pick)
            tr=template_stream.select(station=pick.station,\
                                   channel='??'+pick.channel[-1])
            if len(tr) >= 1:
                tr=tr[0]
                # Filter the trace
                tr=tr.filter('bandpass', freqmin=freqmin, freqmax=freqmax)
                # Trim the trace around the P-pick
                tr.trim(pick.time+cut[0]-0.05, pick.time+cut[1]+0.5)
                picked.append(tr)
            else:
                print 'No channel for pick'
                print pick
    all_detection_streams.append(Stream(picked))
    print ' I have read in '+str(len(all_detection_streams))+' streams of data'
    all_p_picks.append(p_picks)
    # We now have a list of bandpassed, trimmed streams for all P-picked channels
    # Lets align them
    stachans=[tr.stats.station+'.'+tr.stats.channel\
              for st in all_detection_streams for tr in st]
    stachans=list(set(stachans))
    for i in range(len(stachans)):
        chan_traces=[]
        chan_pick_indexes=[] # Need this for next crop
        for j, detection_stream in enumerate(all_detection_streams):
            stachan=stachans[i]
            # If there is a pick/data for this channel then add it to the list
            detection_trace=detection_stream.select(station=stachan.split('.')[0],\
                                                    channel=stachan.split('.')[1])
            if len(detection_trace)==1:
                chan_traces.append(detection_trace[0])
                chan_pick_indexes.append(j)
            elif len(detection_trace) > 1:
                print 'More than one trace for '+stachan
                chan_traces.append(detection_trace[0])
                chan_pick_indexes.append(j)
        # shiftlen=int(0.4 * (cut[1] - cut[0]) * chan_traces[0].stats.sampling_rate)
        # shiftlen=400
        # shiftlen=200
        shiftlen=10
        shifts, ccs = stacking.align_traces(chan_traces, shiftlen,\
                                       master=chan_traces[-1])
                                       # master=master)
        # Shift by up to 0.5s
        # Ammend the picks using the shifts
        for j in range(len(shifts)):
            shift=shifts[j]
            pick_index=chan_pick_indexes[j] # Tells me which stream to look at
            for pick in all_p_picks[pick_index]:
                if pick.station==stachan.split('.')[0]:# and\
                   # pick.channel=='*'+stachan.split('.')[1][-1]:
                    pick.time-=shift
                    print 'Shifting '+pick.station+' by '+str(shift)+\
                            ' for correlation at '+str(ccs[j])
    # We now have amended picks, now we need to re-trim to complete the alignment
    for i in range(len(all_detection_streams)):
        for j in range(len(all_detection_streams[i])):
            all_detection_streams[i][j].trim(all_p_picks[i][j].time+cut[0], \
                                             all_p_picks[i][j].time+cut[1], \
                                             pad=True, fill_value=0,\
                                             nearest_sample=True)
    # Do a real small-scale adjustment, the stack will be better now
    # for i in range(len(stachans)):
        # chan_traces=[]
        # chan_pick_indexes=[] # Need this for next crop
        # for j, detection_stream in enumerate(all_detection_streams):
            # stachan=stachans[i]
            # # If there is a pick/data for this channel then add it to the list
            # detection_trace=detection_stream.select(station=stachan.split('.')[0],\
                                                    # channel=stachan.split('.')[1])
            # if len(detection_trace)==1:
                # chan_traces.append(detection_trace[0])
                # chan_pick_indexes.append(j)
            # elif len(detection_trace) > 1:
                # print 'More than one trace for '+stachan
                # chan_traces.append(detection_trace[0])
                # chan_pick_indexes.append(j)
        # master=stacking.linstack([Stream(tr) for tr in chan_traces])[0]
        # shifts, ccs = stacking.align_traces(chan_traces, 10,\
                                       # master=master)
        # # Shift by up to 0.5s
        # # Ammend the picks using the shifts
        # for j in range(len(shifts)):
            # shift=shifts[j]
            # pick_index=chan_pick_indexes[j] # Tells me which stream to look at
            # for pick in all_p_picks[pick_index]:
                # if pick.station==stachan.split('.')[0]:# and\
                   # # pick.channel=='*'+stachan.split('.')[1][-1]:
                    # pick.time-=shift
                    # print 'Shifting '+pick.station+' by '+str(shift)+\
                            # ' for correlation at '+str(ccs[j])
    # # We now have amended picks, now we need to re-trim to complete the alignment
    # for i in range(len(all_detection_streams)):
        # for j in range(len(all_detection_streams[i])):
            # all_detection_streams[i][j].trim(all_p_picks[i][j].time+cut[0], \
                                             # all_p_picks[i][j].time+cut[1], \
                                             # pad=True, fill_value=0,\
                                             # nearest_sample=True)


    #--------------------------------------------------------------------------
    # Now we have completely aligned traces:
    # We need to remove poorly correlated traces before we compute the SVD
    # We also want to record which stachans have channels for which events
    stachan_event_list=[]
    for stachan in stachans:
        chan_traces=[]
        event_list=[]
        final_event_list=[] # List for the final indexes of events for this stachan
        for i in range(len(all_detection_streams)):
            # Extract channel
            st=all_detection_streams[i]
            tr=st.select(station=stachan.split('.')[0],\
                         channel=stachan.split('.')[1])
            if not len(tr) == 0:
                chan_traces.append(tr[0])
                event_list.append(i)
        # enforce fixed length
        for tr in chan_traces:
            tr.data=tr.data[0:int( tr.stats.sampling_rate * \
                                  ( cut[1] - cut[0] ))]
        # Compute the stack and compare to this
        chan_traces=[Stream(tr) for tr in chan_traces]
        # stack=stacking.linstack(chan_traces)
        stack=chan_traces[-1]
        chan_traces=[st[0] for st in chan_traces]
        if plotvar:
            fig, axes = plt.subplots(len(chan_traces)+1, 1, sharex=True,\
                                     figsize=(7, 12))
            axes=axes.ravel()
            axes[0].plot(stack[0].data, 'r', linewidth=1.5)
            axes[0].set_title(chan_traces[0].stats.station+'.'+\
                              chan_traces[0].stats.channel)
            axes[0].set_ylabel('Stack')
        for i, tr in enumerate(chan_traces):
            if plotvar:
                axes[i+1].plot(tr.data, 'k', linewidth=1.5)
            # corr = normxcorr2(tr.data.astype(np.float32),\
                              # stack[0].data.astype(np.float32))
            dummy, corr = xcorr(tr.data.astype(np.float32),\
                                 stack[0].data.astype(np.float32), 1)
            corr=np.array(corr).reshape(1,1)
            if plotvar:
                axes[i+1].set_ylabel(str(round(corr[0][0],2)))
            if corr[0][0] < corr_thresh:
                # Remove the channel
                print str(corr)+' for channel '+tr.stats.station+'.'+\
                        tr.stats.channel+' event '+str(i)
                all_detection_streams[event_list[i]].remove(tr)
            else:
                final_event_list.append(event_list[i])
        if plotvar:
           plt.show()
        # We should require at-least three detections per channel used
        # Compute the SVD
        if len(final_event_list) >= 3:
            stachan_event_list.append((stachan, final_event_list))
        else:
            for i in range(len(all_detection_streams)):
                tr=all_detection_streams[i].select(station=stachan.split('.')[0])
                if not len(tr) == 0:
                    all_detection_streams[i].remove(tr[0])
    # Remove empty streams
    filled_streams=[]
    for stream in all_detection_streams:
        if not len(stream) == 0:
            filled_streams.append(stream)
    all_detection_streams = filled_streams
    # Now we have the streams that are highly enough correlated and the list of
    # which events these correspond to
    print len(all_detection_streams)
    print stachan_event_list
    if len(all_detection_streams) > 0 and len(all_detection_streams[0]) > 0:
        V, s, U, out_stachans = clustering.SVD(all_detection_streams)
        # Reorder the event list
        event_list=[]
        event_stachans=[]
        for out_stachan in out_stachans:
            for stachan in stachan_event_list:
                if stachan[0] == out_stachan:
                    event_list.append(stachan[1])
                    event_stachans.append(stachan[0])
                    print len(stachan[1])
        print event_list
        relative_moments, event_list = SVD_moments(U, s, V, event_stachans,\
                                                   event_list)
        print '\n\nRelative moments: '
        print relative_moments
        for stachan in stachan_event_list:
            print stachan
        # Now we have the relative moments for all appropriate events - this should
        # include the template event also, which has a manually determined magnitude
        # Check that we have got the template event
        if not event_list[-1] == len(detections):
            print 'Template not included in relative magnitude, fail'
            print 'Largest event in event_list: '+str(event_list[-1])
            print 'You gave me '+str(len(detections))+' detections'
            return False
        # Convert the template magnitude to seismic moment
        template_moment = local_to_moment(template_mag)
        # Extrapolate from the template moment - relative moment relationship to
        # Get the moment for relative moment = 1.0
        norm_moment = template_moment / relative_moments[-1]
        # Template is the last event in the list
        # Now these are weights which we can multiple the moments by
        moments = relative_moments * norm_moment
        print 'Moments '
        print moments
        # Now convert to Mw
        Mw = [2.0/3.0 * (np.log10(M) - 9.0 ) for M in moments]
        print 'Moment magnitudes: '
        print Mw
        # Convert to local
        Ml = [ 0.88 * M + 0.73 for M in Mw ]
        print 'Local magnitudes: '
        print Ml
        print 'Template_magnitude: '
        print template_mag
        i=0
        for event_id in event_list[0:-1]:
            print event_id
            print Ml[i]
            event_headers[event_id].Mag_2=Ml[i]
            event_headers[event_id].Mag_2_type='S'
            i+=1
        return event_headers
    else:
        print 'No useful channels'
        print all_detection_streams
        return False
Exemple #9
0
def multi_event_singlechan(streams,
                           catalog,
                           clip=10.0,
                           pre_pick=2.0,
                           freqmin=False,
                           freqmax=False,
                           realign=False,
                           cut=(-3.0, 5.0),
                           PWS=False,
                           title=False,
                           save=False,
                           savefile=None):
    r"""Function to plot data from a single channel at a single station for \
    multiple events - data will be alligned by their pick-time given in the \
    picks.

    :type streams: list of :class:obspy.stream
    :param streams: List of the streams to use, can contain more traces than \
        you plan on plotting
    :type catalog: obspy.core.event.Catalog
    :param catalog: Catalog of events, one for each trace, with a single pick
    :type clip: float
    :param clip: Length in seconds to plot, defaults to 10.0
    :type pre_pick: float
    :param pre_pick: Length in seconds to extract and plot before the pick, \
        defaults to 2.0
    :type freqmin: float
    :param freqmin: Low cut for bandpass in Hz
    :type freqmax: float
    :param freqmax: High cut for bandpass in Hz
    :type realign: bool
    :param realign: To compute best alignement based on correlation or not.
    :type cut: tuple
    :param cut: tuple of start and end times for cut in seconds from the pick
    :type PWS: bool
    :param PWS: compute Phase Weighted Stack, if False, will compute linear \
        stack.
    :type title: str
    :param title: Plot title.
    :type save: bool
    :param save: False will plot to screen, true will save plot and not show \
        to screen.
    :type savefile: str
    :param savefile: Filename to save to, required for save=True

    :returns: Alligned and cut traces, and new picks
    """
    _check_save_args(save, savefile)
    from eqcorrscan.utils import stacking
    import copy
    from eqcorrscan.core.match_filter import normxcorr2
    from obspy import Stream
    import warnings
    fig, axes = plt.subplots(len(catalog) + 1, 1, sharex=True, figsize=(7, 12))
    axes = axes.ravel()
    traces = []
    al_traces = []
    # Keep input safe
    clist = copy.deepcopy(catalog)
    st_list = copy.deepcopy(streams)
    for i, event in enumerate(clist):
        if st_list[i].select(station=event.picks[0].waveform_id.station_code,
                             channel='*' +
                             event.picks[0].waveform_id.channel_code[-1]):
            tr = st_list[i].select(
                station=event.picks[0].waveforms_id.station_code,
                channel='*' + event.picks[0].waveform_id.channel_code[-1])[0]
        else:
            print('No data for ' + event.pick[0].waveform_id)
            continue
        tr.detrend('linear')
        if freqmin:
            tr.filter('bandpass', freqmin=freqmin, freqmax=freqmax)
        if realign:
            tr_cut = tr.copy()
            tr_cut.trim(event.picks[0].time + cut[0],
                        event.picks[0].time + cut[1],
                        nearest_sample=False)
            if len(tr_cut.data) <= (0.5 * (cut[1] - cut[0]) *
                                    tr_cut.stats.sampling_rate):
                msg = ''.join([
                    'Not enough in the trace for ', tr.stats.station, '.',
                    tr.stats.channel, '\n',
                    'Suggest removing pick from sfile at time ',
                    str(event.picks[0].time)
                ])
                warnings.warn(msg)
            else:
                al_traces.append(tr_cut)
        else:
            tr.trim(event.picks[0].time - pre_pick,
                    event.picks[0].time + clip - pre_pick,
                    nearest_sample=False)
        if len(tr.data) == 0:
            msg = ''.join([
                'No data in the trace for ', tr.stats.station, '.',
                tr.stats.channel, '\n',
                'Suggest removing pick from sfile at time ',
                str(event.picks[0].time)
            ])
            warnings.warn(msg)
            continue
        traces.append(tr)
    if realign:
        shift_len = int(0.25 * (cut[1] - cut[0]) *
                        al_traces[0].stats.sampling_rate)
        shifts = stacking.align_traces(al_traces, shift_len)
        for i in xrange(len(shifts)):
            print('Shifting by ' + str(shifts[i]) + ' seconds')
            event.picks[0].time -= shifts[i]
            traces[i].trim(event.picks[0].time - pre_pick,
                           event.picks[0].time + clip - pre_pick,
                           nearest_sample=False)
    # We now have a list of traces
    traces = [(trace, trace.stats.starttime.datetime) for trace in traces]
    traces.sort(key=lambda tup: tup[1])
    traces = [trace[0] for trace in traces]
    # Plot the traces
    for i, tr in enumerate(traces):
        y = tr.data
        x = np.arange(len(y))
        x = x / tr.stats.sampling_rate  # convert to seconds
        axes[i + 1].plot(x, y, 'k', linewidth=1.1)
        axes[i + 1].yaxis.set_ticks([])
    traces = [Stream(trace) for trace in traces]
    if PWS:
        linstack = stacking.PWS_stack(traces)
    else:
        linstack = stacking.linstack(traces)
    tr = linstack.select(station=event[0].picks[0].waveform_id.station_code,
                         channel='*' +
                         event[0].picks[0].waveform_id.channel_code[-1])[0]
    y = tr.data
    x = np.arange(len(y))
    x = x / tr.stats.sampling_rate
    axes[0].plot(x, y, 'r', linewidth=2.0)
    axes[0].set_ylabel('Stack', rotation=0)
    axes[0].yaxis.set_ticks([])
    for i, slave in enumerate(traces):
        cc = normxcorr2(tr.data, slave[0].data)
        axes[i + 1].set_ylabel('cc=' + str(round(np.max(cc), 2)), rotation=0)
        axes[i + 1].text(0.9,
                         0.15,
                         str(round(np.max(slave[0].data))),
                         bbox=dict(facecolor='white', alpha=0.95),
                         transform=axes[i + 1].transAxes)
        axes[i + 1].text(
            0.7,
            0.85,
            slave[0].stats.starttime.datetime.strftime('%Y/%m/%d %H:%M:%S'),
            bbox=dict(facecolor='white', alpha=0.95),
            transform=axes[i + 1].transAxes)
    axes[-1].set_xlabel('Time (s)')
    if title:
        axes[0].set_title(title)
    plt.subplots_adjust(hspace=0)
    if not save:
        plt.show()
    else:
        plt.savefig(savefile)
    return traces, clist