Ejemplo n.º 1
0
    def test_xcorr_pick_correction(self):
        """
        Test cross correlation pick correction on a set of two small local
        earthquakes.
        """
        st1 = read(os.path.join(self.path,
                                'BW.UH1._.EHZ.D.2010.147.a.slist.gz'))
        st2 = read(os.path.join(self.path,
                                'BW.UH1._.EHZ.D.2010.147.b.slist.gz'))

        tr1 = st1.select(component="Z")[0]
        tr2 = st2.select(component="Z")[0]
        tr1_copy = tr1.copy()
        tr2_copy = tr2.copy()
        t1 = UTCDateTime("2010-05-27T16:24:33.315000Z")
        t2 = UTCDateTime("2010-05-27T16:27:30.585000Z")

        dt, coeff = xcorr_pick_correction(t1, tr1, t2, tr2, 0.05, 0.2, 0.1)
        self.assertAlmostEqual(dt, -0.014459080288833711)
        self.assertAlmostEqual(coeff, 0.91542878457939791)
        dt, coeff = xcorr_pick_correction(t2, tr2, t1, tr1, 0.05, 0.2, 0.1)
        self.assertAlmostEqual(dt, 0.014459080288833711)
        self.assertAlmostEqual(coeff, 0.91542878457939791)
        dt, coeff = xcorr_pick_correction(
            t1, tr1, t2, tr2, 0.05, 0.2, 0.1, filter="bandpass",
            filter_options={'freqmin': 1, 'freqmax': 10})
        self.assertAlmostEqual(dt, -0.013025086360067755)
        self.assertAlmostEqual(coeff, 0.98279277273758803)
        self.assertEqual(tr1, tr1_copy)
        self.assertEqual(tr2, tr2_copy)
Ejemplo n.º 2
0
    def test_xcorrPickCorrection(self):
        """
        Test cross correlation pick correction on a set of two small local
        earthquakes.
        """
        st1 = read(os.path.join(self.path,
                                'BW.UH1._.EHZ.D.2010.147.a.slist.gz'))
        st2 = read(os.path.join(self.path,
                                'BW.UH1._.EHZ.D.2010.147.b.slist.gz'))

        tr1 = st1.select(component="Z")[0]
        tr2 = st2.select(component="Z")[0]
        tr1_copy = tr1.copy()
        tr2_copy = tr2.copy()
        t1 = UTCDateTime("2010-05-27T16:24:33.315000Z")
        t2 = UTCDateTime("2010-05-27T16:27:30.585000Z")

        dt, coeff = xcorr_pick_correction(t1, tr1, t2, tr2, 0.05, 0.2, 0.1)
        self.assertAlmostEqual(dt, -0.014459080288833711)
        self.assertAlmostEqual(coeff, 0.91542878457939791)
        dt, coeff = xcorr_pick_correction(t2, tr2, t1, tr1, 0.05, 0.2, 0.1)
        self.assertAlmostEqual(dt, 0.014459080288833711)
        self.assertAlmostEqual(coeff, 0.91542878457939791)
        dt, coeff = xcorr_pick_correction(
            t1, tr1, t2, tr2, 0.05, 0.2, 0.1, filter="bandpass",
            filter_options={'freqmin': 1, 'freqmax': 10})
        self.assertAlmostEqual(dt, -0.013025086360067755)
        self.assertAlmostEqual(coeff, 0.98279277273758803)
        self.assertEqual(tr1, tr1_copy)
        self.assertEqual(tr2, tr2_copy)
Ejemplo n.º 3
0
    def test_xcorr_pick_correction_images(self):
        """
        Test cross correlation pick correction on a set of two small local
        earthquakes.
        """
        st1 = read(
            os.path.join(self.path, 'BW.UH1._.EHZ.D.2010.147.a.slist.gz'))
        st2 = read(
            os.path.join(self.path, 'BW.UH1._.EHZ.D.2010.147.b.slist.gz'))

        tr1 = st1.select(component="Z")[0]
        tr2 = st2.select(component="Z")[0]
        t1 = UTCDateTime("2010-05-27T16:24:33.315000Z")
        t2 = UTCDateTime("2010-05-27T16:27:30.585000Z")

        with ImageComparison(self.path_images, 'xcorr_pick_corr.png') as ic:
            dt, coeff = xcorr_pick_correction(t1,
                                              tr1,
                                              t2,
                                              tr2,
                                              0.05,
                                              0.2,
                                              0.1,
                                              plot=True,
                                              filename=ic.name)
Ejemplo n.º 4
0
def deconvolve(specratio,
               method='multitaper',
               winlength=5.0,
               freqmin=0.5,
               freqmax=5.0,
               trim=None):
    #trim down the traces
    mdata = specratio.mastertr
    edata = specratio.egftr
    pick_large = mdata.stats.starttime + 2.
    pick_small = edata.stats.starttime + 2.
    if trim:
        mdata.trim(starttime=pick_large + trim[0],
                   endtime=pick_large + trim[1])
        edata.trim(starttime=pick_small + trim[0],
                   endtime=pick_small + trim[1])
    dt, coeff = xcorr_pick_correction(pick_large,
                                      mdata,
                                      pick_small,
                                      edata,
                                      t_before=0.25,
                                      t_after=1.0,
                                      cc_maxlag=1.5,
                                      filter="bandpass",
                                      filter_options={
                                          'freqmin': freqmin,
                                          'freqmax': freqmax
                                      })
    pick_small = pick_small + dt  #realign the traces by cross-correlation
    ts1 = mdata.copy()
    ts1.trim(pick_large, pick_large + winlength)
    N = len(ts1)
    nfft = next_pow_2(N)
    ts2 = edata.copy()
    ts2.trim(pick_small, pick_small + winlength)
    if method == 'multitaper':
        freqs, specs, mspecs, especs, deconvolved = specrat_gen(
            ts1, ts2, nfft, 4)
    elif method == 'traditional':
        deconvolved = deconvf(ts2, ts1, ts1.stats.sampling_rate)
    M = np.arange(0, len(deconvolved))
    N = len(M)
    SeD = np.where(np.logical_and(M >= 0, M < N / 2))
    d1 = deconvolved[SeD]
    SeD2 = np.where(np.logical_and(M > N / 2, M <= N + 1))
    d2 = deconvolved[SeD2]
    stf = np.concatenate((d2, d1))
    stf /= stf.max()
    return stf, coeff
Ejemplo n.º 5
0
    def test_xcorr_pick_correction_images(self):
        """
        Test cross correlation pick correction on a set of two small local
        earthquakes.
        """
        st1 = read(os.path.join(self.path,
                                'BW.UH1._.EHZ.D.2010.147.a.slist.gz'))
        st2 = read(os.path.join(self.path,
                                'BW.UH1._.EHZ.D.2010.147.b.slist.gz'))

        tr1 = st1.select(component="Z")[0]
        tr2 = st2.select(component="Z")[0]
        t1 = UTCDateTime("2010-05-27T16:24:33.315000Z")
        t2 = UTCDateTime("2010-05-27T16:27:30.585000Z")

        with ImageComparison(self.path_images, 'xcorr_pick_corr.png') as ic:
            dt, coeff = xcorr_pick_correction(
                t1, tr1, t2, tr2, 0.05, 0.2, 0.1, plot=True, filename=ic.name)
Ejemplo n.º 6
0
def subsample_xcorr_shift(d, s):
    """
    Calculate the correlation time shift around the maximum amplitude of the
    synthetic trace with subsample accuracy.
    :param s:
    :param d:
    """
    # Estimate shift and use it as a guideline for the subsample accuracy
    # shift.
    time_shift = _xcorr_shift(d.data, s.data) * d.stats.delta

    # Align on the maximum amplitude of the synthetics.
    pick_time = s.stats.starttime + s.data.argmax() * s.stats.delta

    # Will raise a warning if the trace ids don't match which we don't care
    # about here.
    with warnings.catch_warnings():
        warnings.simplefilter("ignore")
        return xcorr_pick_correction(pick_time, s, pick_time, d,
                                     20.0 * time_shift, 20.0 * time_shift,
                                     10.0 * time_shift)[0]
Ejemplo n.º 7
0
def subsample_xcorr_shift(d, s):
    """
    Calculate the correlation time shift around the maximum amplitude of the
    synthetic trace with subsample accuracy.
    :param s:
    :param d:
    """
    # Estimate shift and use it as a guideline for the subsample accuracy
    # shift.
    time_shift = _xcorr_shift(d.data, s.data) * d.stats.delta

    # Align on the maximum amplitude of the synthetics.
    pick_time = s.stats.starttime + s.data.argmax() * s.stats.delta

    # Will raise a warning if the trace ids don't match which we don't care
    # about here.
    with warnings.catch_warnings():
        warnings.simplefilter("ignore")
        return xcorr_pick_correction(
            pick_time, s, pick_time, d, 20.0 * time_shift,
            20.0 * time_shift, 10.0 * time_shift)[0]
Ejemplo n.º 8
0
     tmaster = tr
     t0_utc = tr.stats.starttime + tr.stats.sac.t5
     t5_mas = tr.stats.sac.t5
     nsamples = 0  # doesn't shift master
     #print('tmaster: ', t0_utc, ' t5: ', t5_mas)
 else:
     tp_utc = tr.stats.starttime + tr.stats.sac.t5
     #print('k: ', k , ' t: ', tp_utc, ' t5: ', tr.stats.sac.t5)
     try:
         dt_shift, coeff = xcorr_pick_correction(t0_utc,
                                                 tmaster,
                                                 tp_utc,
                                                 tr,
                                                 0.1,
                                                 1.6,
                                                 1.7,
                                                 plot=False,
                                                 filter="bandpass",
                                                 filter_options={
                                                     'freqmin': 1,
                                                     'freqmax': 10
                                                 })
     except:
         dt_shift = float("NaN")
     if np.isnan(dt_shift):
         dt_shift = 0
     tr.stats.sac.t5 = round(tr.stats.sac.t5 + dt_shift, 2)
     nsamples = int(
         np.round((t5_mas - tr.stats.sac.t5) *
                  tmaster.stats.sampling_rate))
     #print('nsamples: ', dt_shift*tmaster.stats.sampling_rate, ' dt_shift: ', dt_shift, ' tp_utc: ', tp_utc)
Ejemplo n.º 9
0
# dir1 = '/Users/nunn/Google Drive/for_Katja/PDART2'

st1 = obspy.read(join(dir1, '1973/XA/S12/MHZ/XA.S12..MHZ.1973.006.gz'))
st2 = obspy.read(join(dir1, '1973/XA/S12/MHZ/XA.S12..MHZ.1973.060.gz'))

print(st1)
print(st2)

t1 = UTCDateTime("1973-01-06T05:39:12.209122Z")
t2 = UTCDateTime("1973-03-01T07:18:03.829105Z")

# I'm using large windows in case I want to change the parameters
st1.merge()
st1.trim(starttime=t1 - 1200, endtime=t1 + 3600)
st2.merge()
st2.trim(starttime=t2 - 1200, endtime=t2 + 3600)

tr1 = st1.select(component="Z")[0]
tr2 = st2.select(component="Z")[0]

tr1.filter("bandpass", freqmin=0.3, freqmax=0.5, corners=3)
tr2.filter("bandpass", freqmin=0.3, freqmax=0.5, corners=3)

print(tr1)
print(tr2)

dt, coeff = xcorr_pick_correction(t1, tr1, t2, tr2, -10, 600, 10, plot=True)

print("  Time correction for pick 2: %.6f" % dt)
print("  Correlation coefficient: %.2f" % coeff)
Ejemplo n.º 10
0
def refine_picks(catalog, stream_dict, pre_pick, post_pick, shift_len,
                 cc_thresh, master=None, lowcut=1.0, highcut=20.0,
                 plotvar=False):
    r"""Function to refine picks in a catalog based upon either a pre-chosen\
    master event or the event in the catalog with the highest amplitude.

    :type catalog: class: obspy.Catalog
    :param catalog: Catalog of events which we want to adjust picks for
    :type stream_dict: dict
    :param stream_dict: Dictionary with key:value pairing of event\
        ResourceID:obspy.Stream for each event in catalog.
    :type pre_pick: float
    :param pre_pick: Time before the pick to start the correlation window
    :type post_pick: float
    :param post_pick: Time after the pick to start the correlation window
    :type shift_len: float
    :param shift_len: Time to allow pick to vary
    :type master: bool or str
    :param master: If 'None', master event defaults to the event with the\
        highest SNR. Otherwise, must specify a valid event resoure_id\
        from the catalog.
    :type lowcut: float
    :param lowcut: Lowcut in Hz - default=1.0
    :type highcut: float
    :param highcut: Highcut in Hz - deafult=10.0

    :returns: class: obspy.Catalog
    """

    import obspy
    if int(obspy.__version__.split('.')[0]) > 0:
        from obspy.signal.cross_correlation import xcorr_pick_correction
    else:
        from obspy.signal.cross_correlation import xcorrPickCorrection \
            as xcorr_pick_correction

    # Establish master template if not specified
    if master:
        master_id = obspy.ResourceIdentifier(master)
    else:
        # Find event with highest SNR to be master
        avg_snr = {}
        for event in catalog:
            avg_snr[event.resource_id] =\
                sum([x.snr for x in event.amplitudes]) / len(event.amplitudes)
        master_id = max(avg_snr.iterkeys(), key=(lambda key: avg_snr[key]))
    # Loop back through catalog and extract master event (there better way?)
    master_event = [x for x in catalog if x.resource_id == master_id][0]
    master_stream = stream_dict[master_id]

    new_catalog = obspy.Catalog()
    # Figure total number of picks
    tot_pks = 0
    for event in catalog:
        for cnt_pick in event:
            tot_pks += 1
    refined_num = 0
    # Now loop the master through all events in catalog
    for slave_event in catalog:
        # Copy old slave event and reset the picks (keep the rest of the info)
        # new_event = obspy.core.event.Event()
        new_event = slave_event.copy()
        new_event.picks = []
        slave_stream = stream_dict[slave_event.resource_id]
        # Find UNcommon picks between slave and master
        mismatches = uncommon_picks(slave_event, master_event)
        # Append them to new event (otherwise they get missed)
        for uncom_pick in mismatches:
            new_event.picks.append(uncom_pick)
        for pick in master_event.picks:
            # Find station, phase pairs
            # Added by Carolin
            slave_matches = [p for p in slave_event.picks
                             if p.phase_hint == pick.phase_hint
                             and p.waveform_id.station_code ==
                             pick.waveform_id.station_code]
            if master_stream.select(station=pick.waveform_id.station_code,
                                    channel='*' +
                                    pick.waveform_id.channel_code[-1]):
                mastertr = master_stream.\
                    select(station=pick.waveform_id.station_code,
                           channel='*' +
                           pick.waveform_id.channel_code[-1])[0]
            else:
                print('No waveform data for ' +
                      pick.waveform_id.station_code + '.' +
                      pick.waveform_id.channel_code)
                break
            for slave_pick in slave_matches:
                if slave_stream.select(station=slave_pick.waveform_id.
                                       station_code,
                                       channel='*'+slave_pick.waveform_id.
                                       channel_code[-1]):
                    slavetr = slave_stream.\
                        select(station=slave_pick.waveform_id.station_code,
                               channel='*'+slave_pick.waveform_id.
                               channel_code[-1])[0]
                else:
                    print('No slave data for ' +
                          slave_pick.waveform_id.station_code + '.' +
                          slave_pick.waveform_id.channel_code)
                    break
                try:
                    correction, cc =\
                        xcorr_pick_correction(pick.time, mastertr,
                                              slave_pick.time,
                                              slavetr, pre_pick, post_pick,
                                              shift_len, filter="bandpass",
                                              filter_options={'freqmin':
                                                              lowcut,
                                                              'freqmax':
                                                              highcut},
                                              plot=plotvar)
                    if abs(correction) > shift_len:
                        warnings.warn('Shift correction too large, ' +
                                      'will not use')
                        new_event.picks.append(slave_pick)
                        continue
                    if cc > cc_thresh:
                        print('Threshold exceeded')
                        new_pick_time = slave_pick.time + correction
                        new_pick = slave_pick.copy()
                        new_pick.time = new_pick_time
                        new_pick.creation_info.agency_id = 'VUW'
                        new_pick.creation_info.author = 'eqcorrscan.refine_picks()'
                        new_pick.creation_info.creation_time = obspy.UTCDateTime.now()
                        new_event.picks.append(new_pick)
                        refined_num += 1
                    else:
                        # new_event.picks.append(slave_pick)
                        print('Correlation not good enough to correct pick')
                        new_event.picks.append(slave_pick)
                except:
                    # Should warn here
                    msg = "Couldn't compute correlation correction"
                    warnings.warn(msg)
                    new_event.picks.append(slave_pick)
                    continue
        new_catalog += new_event
    print('Refined %d of %d picks' % (refined_num, tot_pks))
    return new_catalog
Ejemplo n.º 11
0
def write_correlations(event_list,
                       wavbase,
                       extract_len,
                       pre_pick,
                       shift_len,
                       lowcut=1.0,
                       highcut=10.0,
                       max_sep=8,
                       min_link=8,
                       cc_thresh=0.0,
                       plotvar=False,
                       debug=0):
    """
    Write a dt.cc file for hypoDD input for a given list of events.

    Takes an input list of events and computes pick refinements by correlation.
    Outputs two files, dt.cc and dt.cc2, each provides a different weight,
    dt.cc uses weights of the cross-correlation, and dt.cc2 provides weights
    as the square of the cross-correlation.

    :type event_list: list
    :param event_list: List of tuples of event_id (int) and sfile (String)
    :type wavbase: str
    :param wavbase: Path to the seisan wave directory that the wavefiles in the
                    S-files are stored
    :type extract_len: float
    :param extract_len: Length in seconds to extract around the pick
    :type pre_pick: float
    :param pre_pick: Time before the pick to start the correlation window
    :type shift_len: float
    :param shift_len: Time to allow pick to vary
    :type lowcut: float
    :param lowcut: Lowcut in Hz - default=1.0
    :type highcut: float
    :param highcut: Highcut in Hz - default=10.0
    :type max_sep: float
    :param max_sep: Maximum separation between event pairs in km
    :type min_link: int
    :param min_link: Minimum links for an event to be paired
    :type cc_thresh: float
    :param cc_thresh: Threshold to include cross-correlation results.
    :type plotvar: bool
    :param plotvar: To show the pick-correction plots, defualts to False.
    :type debug: int
    :param debug: Variable debug levels from 0-5, higher=more output.

    .. warning:: This is not a fast routine!

    .. warning::
        In contrast to seisan's corr routine, but in accordance with the
        hypoDD manual, this outputs corrected differential time.

    .. note::
        Currently we have not implemented a method for taking
        unassociated event objects and wavefiles.  As such if you have events \
        with associated wavefiles you are advised to generate Sfiles for each \
        event using the sfile_util module prior to this step.

    .. note::
        There is no provision to taper waveforms within these functions, if you
        desire this functionality, you should apply the taper before calling
        this.  Note the :func:`obspy.Trace.taper` functions.
    """
    warnings.filterwarnings(action="ignore",
                            message="Maximum of cross correlation " +
                            "lower than 0.8: *")
    corr_list = []
    f = open('dt.cc', 'w')
    f2 = open('dt.cc2', 'w')
    k_events = len(list(event_list))
    for i, master in enumerate(event_list):
        master_sfile = master[1]
        if debug > 1:
            print('Computing correlations for master: %s' % master_sfile)
        master_event_id = master[0]
        master_event = read_nordic(master_sfile)[0]
        master_picks = master_event.picks
        master_ori_time = master_event.origins[0].time
        master_location = (master_event.origins[0].latitude,
                           master_event.origins[0].longitude,
                           master_event.origins[0].depth / 1000.0)
        master_wavefiles = readwavename(master_sfile)
        masterpath = glob.glob(wavbase + os.sep + master_wavefiles[0])
        if masterpath:
            masterstream = read(masterpath[0])
        if len(master_wavefiles) > 1:
            for wavefile in master_wavefiles:
                try:
                    masterstream += read(os.join(wavbase, wavefile))
                except:
                    raise IOError("Couldn't find wavefile")
                    continue
        for j in range(i + 1, k_events):
            # Use this tactic to only output unique event pairings
            slave_sfile = event_list[j][1]
            if debug > 2:
                print('Comparing to event: %s' % slave_sfile)
            slave_event_id = event_list[j][0]
            slave_wavefiles = readwavename(slave_sfile)
            try:
                slavestream = read(wavbase + os.sep + slave_wavefiles[0])
            except:
                raise IOError('No wavefile found: ' + slave_wavefiles[0] +
                              ' ' + slave_sfile)
            if len(slave_wavefiles) > 1:
                for wavefile in slave_wavefiles:
                    try:
                        slavestream += read(wavbase + os.sep + wavefile)
                    except IOError:
                        print('No waveform found: %s' %
                              (wavbase + os.sep + wavefile))
                        continue
            # Write out the header line
            event_text = '#' + str(master_event_id).rjust(10) +\
                str(slave_event_id).rjust(10) + ' 0.0   \n'
            event_text2 = '#' + str(master_event_id).rjust(10) +\
                str(slave_event_id).rjust(10) + ' 0.0   \n'
            slave_event = read_nordic(slave_sfile)[0]
            slave_picks = slave_event.picks
            slave_ori_time = slave_event.origins[0].time
            slave_location = (slave_event.origins[0].latitude,
                              slave_event.origins[0].longitude,
                              slave_event.origins[0].depth / 1000.0)
            if dist_calc(master_location, slave_location) > max_sep:
                if debug > 0:
                    print('Seperation exceeds max_sep: %s' %
                          (dist_calc(master_location, slave_location)))
                continue
            links = 0
            phases = 0
            for pick in master_picks:
                if not hasattr(pick, 'phase_hint') or \
                                len(pick.phase_hint) == 0:
                    warnings.warn('No phase-hint for pick:')
                    print(pick)
                    continue
                if pick.phase_hint[0].upper() not in ['P', 'S']:
                    warnings.warn('Will only use P or S phase picks')
                    print(pick)
                    continue
                    # Only use P and S picks, not amplitude or 'other'
                # Find station, phase pairs
                # Added by Carolin
                slave_matches = [
                    p for p in slave_picks if hasattr(p, 'phase_hint')
                    and p.phase_hint == pick.phase_hint and
                    p.waveform_id.station_code == pick.waveform_id.station_code
                ]

                if masterstream.select(station=pick.waveform_id.station_code,
                                       channel='*' +
                                       pick.waveform_id.channel_code[-1]):
                    mastertr = masterstream.\
                        select(station=pick.waveform_id.station_code,
                               channel='*' +
                               pick.waveform_id.channel_code[-1])[0]
                elif debug > 1:
                    print('No waveform data for ' +
                          pick.waveform_id.station_code + '.' +
                          pick.waveform_id.channel_code)
                    print(pick.waveform_id.station_code + '.' +
                          pick.waveform_id.channel_code + ' ' + slave_sfile +
                          ' ' + master_sfile)
                    break
                # Loop through the matches
                for slave_pick in slave_matches:
                    if slavestream.select(
                            station=slave_pick.waveform_id.station_code,
                            channel='*' +
                            slave_pick.waveform_id.channel_code[-1]):
                        slavetr = slavestream.\
                            select(station=slave_pick.waveform_id.station_code,
                                   channel='*' + slave_pick.waveform_id.
                                   channel_code[-1])[0]
                    else:
                        print('No slave data for ' +
                              slave_pick.waveform_id.station_code + '.' +
                              slave_pick.waveform_id.channel_code)
                        print(pick.waveform_id.station_code + '.' +
                              pick.waveform_id.channel_code + ' ' +
                              slave_sfile + ' ' + master_sfile)
                        break
                    # Correct the picks
                    try:
                        correction, cc =\
                            xcorr_pick_correction(
                                pick.time, mastertr, slave_pick.time,
                                slavetr, pre_pick, extract_len - pre_pick,
                                shift_len, filter="bandpass",
                                filter_options={'freqmin': lowcut,
                                                'freqmax': highcut},
                                plot=plotvar)
                        # Get the differential travel time using the
                        # corrected time.
                        # Check that the correction is within the allowed shift
                        # This can occur in the obspy routine when the
                        # correlation function is increasing at the end of the
                        # window.
                        if abs(correction) > shift_len:
                            warnings.warn('Shift correction too large, ' +
                                          'will not use')
                            continue
                        correction = (pick.time - master_ori_time) -\
                            (slave_pick.time + correction - slave_ori_time)
                        links += 1
                        if cc >= cc_thresh:
                            weight = cc
                            phases += 1
                            # added by Caro
                            event_text += pick.waveform_id.station_code.\
                                ljust(5) + _cc_round(correction, 3).\
                                rjust(11) + _cc_round(weight, 3).rjust(8) +\
                                ' ' + pick.phase_hint + '\n'
                            event_text2 += pick.waveform_id.station_code\
                                .ljust(5) + _cc_round(correction, 3).\
                                rjust(11) +\
                                _cc_round(weight * weight, 3).rjust(8) +\
                                ' ' + pick.phase_hint + '\n'
                            if debug > 3:
                                print(event_text)
                        else:
                            print('cc too low: %s' % cc)
                        corr_list.append(cc * cc)
                    except:
                        msg = "Couldn't compute correlation correction"
                        warnings.warn(msg)
                        continue
            if links >= min_link and phases > 0:
                f.write(event_text)
                f2.write(event_text2)
    if plotvar:
        plt.hist(corr_list, 150)
        plt.show()
    # f.write('\n')
    f.close()
    f2.close()
    return
Ejemplo n.º 12
0
def write_correlations(event_list, wavbase, extract_len, pre_pick, shift_len,
                       lowcut=1.0, highcut=10.0, max_sep=4, min_link=8,
                       coh_thresh=0.0, coherence_weight=True, plotvar=False):
    """
    Function to write a dt.cc file for hypoDD input - takes an input list of
    events and computes pick refienements by correlation.

    :type event_list: list of tuple
    :param event_list: List of tuples of event_id (int) and sfile (String)
    :type wavbase: str
    :param wavbase: Path to the seisan wave directory that the wavefiles in the
                    S-files are stored
    :type extract_len: float
    :param extract_len: Length in seconds to extract around the pick
    :type pre_pick: float
    :param pre_pick: Time before the pick to start the correlation window
    :type shift_len: float
    :param shift_len: Time to allow pick to vary
    :type lowcut: float
    :param lowcut: Lowcut in Hz - default=1.0
    :type highcut: float
    :param highcut: Highcut in Hz - deafult=10.0
    :type max_sep: float
    :param max_sep: Maximum seperation between event pairs in km
    :type min_link: int
    :param min_link: Minimum links for an event to be paired
    :type coherence_weight: bool
    :param coherence_weight: Use coherence to weight the dt.cc file, or the \
        raw cross-correlation value, defaults to false which uses the cross-\
        correlation value.
    :type plotvar: bool
    :param plotvar: To show the pick-correction plots, defualts to False.

    .. warning:: This is not a fast routine!

    .. warning:: In contrast to seisan's \
        corr routine, but in accordance with the hypoDD manual, this outputs \
        corrected differential time.

    .. note:: Currently we have not implemented a method for taking \
        unassociated event objects and wavefiles.  As such if you have events \
        with associated wavefiles you are advised to generate Sfiles for each \
        event using the sfile_util module prior to this step.
    """
    import obspy
    if int(obspy.__version__.split('.')[0]) > 0:
        from obspy.signal.cross_correlation import xcorr_pick_correction
    else:
        from obspy.signal.cross_correlation import xcorrPickCorrection \
            as xcorr_pick_correction
    import matplotlib.pyplot as plt
    from obspy import read
    from eqcorrscan.utils.mag_calc import dist_calc
    import glob
    import warnings

    corr_list = []
    f = open('dt.cc', 'w')
    f2 = open('dt.cc2', 'w')
    for i, master in enumerate(event_list):
        master_sfile = master[1]
        master_event_id = master[0]
        master_picks = sfile_util.readpicks(master_sfile).picks
        master_event = sfile_util.readheader(master_sfile)
        master_ori_time = master_event.origins[0].time
        master_location = (master_event.origins[0].latitude,
                           master_event.origins[0].longitude,
                           master_event.origins[0].depth)
        master_wavefiles = sfile_util.readwavename(master_sfile)
        masterpath = glob.glob(wavbase + os.sep + master_wavefiles[0])
        if masterpath:
            masterstream = read(masterpath[0])
        if len(master_wavefiles) > 1:
            for wavefile in master_wavefiles:
                try:
                    masterstream += read(os.join(wavbase, wavefile))
                except:
                    continue
                    raise IOError("Couldn't find wavefile")
        for j in range(i+1, len(event_list)):
            # Use this tactic to only output unique event pairings
            slave_sfile = event_list[j][1]
            slave_event_id = event_list[j][0]
            slave_wavefiles = sfile_util.readwavename(slave_sfile)
            try:
                # slavestream=read(wavbase+'/*/*/'+slave_wavefiles[0])
                slavestream = read(wavbase + os.sep + slave_wavefiles[0])
            except:
                # print(slavestream)
                raise IOError('No wavefile found: '+slave_wavefiles[0]+' ' +
                              slave_sfile)
            if len(slave_wavefiles) > 1:
                for wavefile in slave_wavefiles:
                    # slavestream+=read(wavbase+'/*/*/'+wavefile)
                    try:
                        slavestream += read(wavbase+'/'+wavefile)
                    except:
                        continue
            # Write out the header line
            event_text = '#'+str(master_event_id).rjust(10) +\
                str(slave_event_id).rjust(10)+' 0.0   \n'
            event_text2 = '#'+str(master_event_id).rjust(10) +\
                str(slave_event_id).rjust(10)+' 0.0   \n'
            slave_picks = sfile_util.readpicks(slave_sfile).picks
            slave_event = sfile_util.readheader(slave_sfile)
            slave_ori_time = slave_event.origins[0].time
            slave_location = (slave_event.origins[0].latitude,
                              slave_event.origins[0].longitude,
                              slave_event.origins[0].depth)
            if dist_calc(master_location, slave_location) > max_sep:
                continue
            links = 0
            phases = 0
            for pick in master_picks:
                if pick.phase_hint[0].upper() not in ['P', 'S']:
                    continue
                    # Only use P and S picks, not amplitude or 'other'
                # Find station, phase pairs
                # Added by Carolin
                slave_matches = [p for p in slave_picks
                                 if p.phase_hint == pick.phase_hint
                                 and p.waveform_id.station_code ==
                                 pick.waveform_id.station_code]

                if masterstream.select(station=pick.waveform_id.station_code,
                                       channel='*' +
                                       pick.waveform_id.channel_code[-1]):
                    mastertr = masterstream.\
                        select(station=pick.waveform_id.station_code,
                               channel='*' +
                               pick.waveform_id.channel_code[-1])[0]
                else:
                    print('No waveform data for ' +
                          pick.waveform_id.station_code + '.' +
                          pick.waveform_id.channel_code)
                    print(pick.waveform_id.station_code +
                          '.' + pick.waveform_id.channel_code +
                          ' ' + slave_sfile+' ' + master_sfile)
                    break
                # Loop through the matches
                for slave_pick in slave_matches:
                    if slavestream.select(station=slave_pick.waveform_id.
                                          station_code,
                                          channel='*'+slave_pick.waveform_id.
                                          channel_code[-1]):
                        slavetr = slavestream.\
                            select(station=slave_pick.waveform_id.station_code,
                                   channel='*'+slave_pick.waveform_id.
                                   channel_code[-1])[0]
                    else:
                        print('No slave data for ' +
                              slave_pick.waveform_id.station_code + '.' +
                              slave_pick.waveform_id.channel_code)
                        print(pick.waveform_id.station_code +
                              '.' + pick.waveform_id.channel_code +
                              ' ' + slave_sfile + ' ' + master_sfile)
                        break
                    # Correct the picks
                    try:
                        correction, cc =\
                            xcorr_pick_correction(pick.time, mastertr,
                                                  slave_pick.time,
                                                  slavetr, pre_pick,
                                                  extract_len - pre_pick,
                                                  shift_len, filter="bandpass",
                                                  filter_options={'freqmin':
                                                                  lowcut,
                                                                  'freqmax':
                                                                  highcut},
                                                  plot=plotvar)
                        # Get the differntial travel time using the
                        # corrected time.
                        # Check that the correction is within the allowed shift
                        # This can occur in the obspy routine when the
                        # correlation function is increasing at the end of the
                        # window.
                        if abs(correction) > shift_len:
                            warnings.warn('Shift correction too large, ' +
                                          'will not use')
                            continue
                        correction = (pick.time - master_ori_time) -\
                            (slave_pick.time + correction - slave_ori_time)
                        links += 1
                        if cc * cc >= coh_thresh:
                            if coherence_weight:
                                weight = cc * cc
                            else:
                                weight = cc
                            phases += 1
                            # added by Caro
                            event_text += pick.waveform_id.station_code.\
                                ljust(5) + _cc_round(correction, 3).\
                                rjust(11) + _cc_round(weight, 3).rjust(8) +\
                                ' '+pick.phase_hint+'\n'
                            event_text2 += pick.waveform_id.station_code\
                                .ljust(5).upper() +\
                                _cc_round(correction, 3).rjust(11) +\
                                _cc_round(weight, 3).rjust(8) +\
                                ' '+pick.phase_hint+'\n'

                            # links+=1
                        corr_list.append(cc*cc)
                    except:
                        # Should warn here
                        msg = "Couldn't compute correlation correction"
                        warnings.warn(msg)
                        continue
            if links >= min_link and phases > 0:
                f.write(event_text)
                f2.write(event_text2)
    if plotvar:
        plt.hist(corr_list, 150)
        plt.show()
    # f.write('\n')
    f.close()
    f2.close()
    return
Ejemplo n.º 13
0
# read example data of two small earthquakes
path = "https://examples.obspy.org/BW.UH1..EHZ.D.2010.147.%s.slist.gz"
st1 = obspy.read(path % ("a", ))
st2 = obspy.read(path % ("b", ))
# select the single traces to use in correlation.
# to avoid artifacts from preprocessing there should be some data left and
# right of the short time window actually used in the correlation.
tr1 = st1.select(component="Z")[0]
tr2 = st2.select(component="Z")[0]
# these are the original pick times set during routine analysis
t1 = obspy.UTCDateTime("2010-05-27T16:24:33.315000Z")
t2 = obspy.UTCDateTime("2010-05-27T16:27:30.585000Z")

# estimate the time correction for pick 2 without any preprocessing and open
# a plot window to visually validate the results
dt, coeff = xcorr_pick_correction(t1, tr1, t2, tr2, 0.05, 0.2, 0.1, plot=True)
print("No preprocessing:")
print("  Time correction for pick 2: %.6f" % dt)
print("  Correlation coefficient: %.2f" % coeff)
# estimate the time correction with bandpass prefiltering
dt, coeff = xcorr_pick_correction(t1,
                                  tr1,
                                  t2,
                                  tr2,
                                  0.05,
                                  0.2,
                                  0.1,
                                  plot=True,
                                  filter="bandpass",
                                  filter_options={
                                      'freqmin': 1,
Ejemplo n.º 14
0
    def moving_window(reference, stream_waves, t_before_p, t_after_p, lp, hp,
                      windowing):

        target, p1 = prep_data(reference)

        DECORR = []
        CORR = []
        STIME = []
        Traces_good_cc = []
        Targets = []

        stream_waves.sort()

        for ist in stream_waves:

            if not ist == reference:

                ij_trace, ij_pick = prep_data(ist)
                ## Performing a cross-correlation for aligning the two seismograms at
                ## the maximum cross-correlation coefficient.
                lag_time, coeff = xcorr_pick_correction(
                    p1,
                    target,
                    ij_pick,
                    ij_trace,
                    t_before=0.05,
                    t_after=4.0,
                    cc_maxlag=1.0,
                    filter="bandpass",
                    filter_options={
                        'freqmin': lp,
                        'freqmax': hp
                    },
                    plot=False,
                )
                # coeff >= 0.9699
                if coeff >= 0.90:

                    STIME.append(ij_trace.stats.starttime)
                    print("Reference trace vs %s, CC = %s" % (ist, coeff))

                    # Correcting both seismograms
                    corrected_target = target.trim(p1 - (t_before_p),
                                                   p1 + (t_after_p))
                    corrected_event2 = ij_trace.trim(
                        ij_pick - (t_before_p - lag_time),
                        ij_pick + (t_after_p + lag_time))

                    # Applying a cosine taper
                    corrected_target.data *= cosine_taper(
                        len(corrected_target), 0.1)
                    corrected_event2.data *= cosine_taper(
                        len(corrected_event2), 0.1)

                    # Sampling rate for the reference and the second waveform
                    sp_t = corrected_target.stats.sampling_rate
                    st_ev2 = corrected_event2.stats.sampling_rate

                    # Now, we need to filter the traces before measuring de-correlation index
                    corr_tar_filt = Trace(
                        bandpass(corrected_target,
                                 lp,
                                 hp,
                                 sp_t,
                                 corners=4,
                                 zerophase=True))
                    corr_tar2_filt = Trace(
                        bandpass(corrected_event2,
                                 lp,
                                 hp,
                                 st_ev2,
                                 corners=4,
                                 zerophase=True))

                    # Interpolating to 1000 Hz to make an smooth measurement
                    corr_tar_filt_int = corr_tar_filt.resample(
                        sampling_rate=1000, )
                    corr_tar2_filt_int = corr_tar2_filt.resample(
                        sampling_rate=1000, )

                    # Checking the lenght of the waveforms. They must correspond
                    l1, l2 = len(corr_tar_filt), len(corr_tar2_filt)

                    if l1 != l2:
                        _msg = "Waveforms have different window_lenght, check data = " + ist
                        raise IOError(_msg)

                    else:
                        a = corr_tar_filt_int
                        b = corr_tar2_filt_int

                    # Before sliding, let's create a copy of the traces
                    a_c = a.copy()
                    b_c = b.copy()

                    Traces_good_cc.append(b_c)
                    Targets.append(a_c)

                    # Window lenght and step function for the moving window.
                    wl = windowing[0] * 100
                    stp = windowing[1] * 100
                    decorr, corr = rolling_window(a, b, wlen=wl, stp=stp)

                    DECORR.append(decorr)
                    CORR.append(corr)

        return DECORR, CORR, STIME, Traces_good_cc, Targets
Ejemplo n.º 15
0
import obspy
from obspy.signal.cross_correlation import xcorr_pick_correction


# read example data of two small earthquakes
st1 = obspy.read("https://examples.obspy.org/BW.UH1..EHZ.D.2010.147.a.slist.gz")
st2 = obspy.read("https://examples.obspy.org/BW.UH1..EHZ.D.2010.147.b.slist.gz")
# select the single traces to use in correlation.
# to avoid artifacts from preprocessing there should be some data left and
# right of the short time window actually used in the correlation.
tr1 = st1.select(component="Z")[0]
tr2 = st2.select(component="Z")[0]
# these are the original pick times set during routine analysis
t1 = obspy.UTCDateTime("2010-05-27T16:24:33.315000Z")
t2 = obspy.UTCDateTime("2010-05-27T16:27:30.585000Z")

# estimate the time correction for pick 2 without any preprocessing and open
# a plot window to visually validate the results
dt, coeff = xcorr_pick_correction(t1, tr1, t2, tr2, 0.05, 0.2, 0.1, plot=True)
print("No preprocessing:")
print("  Time correction for pick 2: %.6f" % dt)
print("  Correlation coefficient: %.2f" % coeff)
# estimate the time correction with bandpass prefiltering
dt, coeff = xcorr_pick_correction(t1, tr1, t2, tr2, 0.05, 0.2, 0.1, plot=True,
                                  filter="bandpass",
                                  filter_options={'freqmin': 1, 'freqmax': 10})
print("Bandpass prefiltering:")
print("  Time correction for pick 2: %.6f" % dt)
print("  Correlation coefficient: %.2f" % coeff)
Ejemplo n.º 16
0
def write_correlations(event_list,
                       wavbase,
                       extract_len,
                       pre_pick,
                       shift_len,
                       lowcut=1.0,
                       highcut=10.0,
                       max_sep=4,
                       min_link=8,
                       coh_thresh=0.0,
                       coherence_weight=True,
                       plotvar=False):
    """
    Function to write a dt.cc file for hypoDD input - takes an input list of
    events and computes pick refienements by correlation.

    :type event_list: list of tuple
    :param event_list: List of tuples of event_id (int) and sfile (String)
    :type wavbase: str
    :param wavbase: Path to the seisan wave directory that the wavefiles in the
                    S-files are stored
    :type extract_len: float
    :param extract_len: Length in seconds to extract around the pick
    :type pre_pick: float
    :param pre_pick: Time before the pick to start the correlation window
    :type shift_len: float
    :param shift_len: Time to allow pick to vary
    :type lowcut: float
    :param lowcut: Lowcut in Hz - default=1.0
    :type highcut: float
    :param highcut: Highcut in Hz - deafult=10.0
    :type max_sep: float
    :param max_sep: Maximum seperation between event pairs in km
    :type min_link: int
    :param min_link: Minimum links for an event to be paired
    :type coherence_weight: bool
    :param coherence_weight: Use coherence to weight the dt.cc file, or the \
        raw cross-correlation value, defaults to false which uses the cross-\
        correlation value.
    :type plotvar: bool
    :param plotvar: To show the pick-correction plots, defualts to False.

    .. warning:: This is not a fast routine!

    .. warning:: In contrast to seisan's \
        corr routine, but in accordance with the hypoDD manual, this outputs \
        corrected differential time.

    .. note:: Currently we have not implemented a method for taking \
        unassociated event objects and wavefiles.  As such if you have events \
        with associated wavefiles you are advised to generate Sfiles for each \
        event using the sfile_util module prior to this step.
    """
    import obspy
    if int(obspy.__version__.split('.')[0]) > 0:
        from obspy.signal.cross_correlation import xcorr_pick_correction
    else:
        from obspy.signal.cross_correlation import xcorrPickCorrection \
            as xcorr_pick_correction
    import matplotlib.pyplot as plt
    from obspy import read
    from eqcorrscan.utils.mag_calc import dist_calc
    import glob
    import warnings

    corr_list = []
    f = open('dt.cc', 'w')
    f2 = open('dt.cc2', 'w')
    for i, master in enumerate(event_list):
        master_sfile = master[1]
        master_event_id = master[0]
        master_picks = sfile_util.readpicks(master_sfile).picks
        master_event = sfile_util.readheader(master_sfile)
        master_ori_time = master_event.origins[0].time
        master_location = (master_event.origins[0].latitude,
                           master_event.origins[0].longitude,
                           master_event.origins[0].depth)
        master_wavefiles = sfile_util.readwavename(master_sfile)
        masterpath = glob.glob(wavbase + os.sep + master_wavefiles[0])
        if masterpath:
            masterstream = read(masterpath[0])
        if len(master_wavefiles) > 1:
            for wavefile in master_wavefiles:
                try:
                    masterstream += read(os.join(wavbase, wavefile))
                except:
                    continue
                    raise IOError("Couldn't find wavefile")
        for j in range(i + 1, len(event_list)):
            # Use this tactic to only output unique event pairings
            slave_sfile = event_list[j][1]
            slave_event_id = event_list[j][0]
            slave_wavefiles = sfile_util.readwavename(slave_sfile)
            try:
                # slavestream=read(wavbase+'/*/*/'+slave_wavefiles[0])
                slavestream = read(wavbase + os.sep + slave_wavefiles[0])
            except:
                # print(slavestream)
                raise IOError('No wavefile found: ' + slave_wavefiles[0] +
                              ' ' + slave_sfile)
            if len(slave_wavefiles) > 1:
                for wavefile in slave_wavefiles:
                    # slavestream+=read(wavbase+'/*/*/'+wavefile)
                    try:
                        slavestream += read(wavbase + '/' + wavefile)
                    except:
                        continue
            # Write out the header line
            event_text = '#'+str(master_event_id).rjust(10) +\
                str(slave_event_id).rjust(10)+' 0.0   \n'
            event_text2 = '#'+str(master_event_id).rjust(10) +\
                str(slave_event_id).rjust(10)+' 0.0   \n'
            slave_picks = sfile_util.readpicks(slave_sfile).picks
            slave_event = sfile_util.readheader(slave_sfile)
            slave_ori_time = slave_event.origins[0].time
            slave_location = (slave_event.origins[0].latitude,
                              slave_event.origins[0].longitude,
                              slave_event.origins[0].depth)
            if dist_calc(master_location, slave_location) > max_sep:
                continue
            links = 0
            phases = 0
            for pick in master_picks:
                if pick.phase_hint[0].upper() not in ['P', 'S']:
                    continue
                    # Only use P and S picks, not amplitude or 'other'
                # Find station, phase pairs
                # Added by Carolin
                slave_matches = [
                    p for p in slave_picks
                    if p.phase_hint == pick.phase_hint and
                    p.waveform_id.station_code == pick.waveform_id.station_code
                ]

                if masterstream.select(station=pick.waveform_id.station_code,
                                       channel='*' +
                                       pick.waveform_id.channel_code[-1]):
                    mastertr = masterstream.\
                        select(station=pick.waveform_id.station_code,
                               channel='*' +
                               pick.waveform_id.channel_code[-1])[0]
                else:
                    print('No waveform data for ' +
                          pick.waveform_id.station_code + '.' +
                          pick.waveform_id.channel_code)
                    print(pick.waveform_id.station_code + '.' +
                          pick.waveform_id.channel_code + ' ' + slave_sfile +
                          ' ' + master_sfile)
                    break
                # Loop through the matches
                for slave_pick in slave_matches:
                    if slavestream.select(
                            station=slave_pick.waveform_id.station_code,
                            channel='*' +
                            slave_pick.waveform_id.channel_code[-1]):
                        slavetr = slavestream.\
                            select(station=slave_pick.waveform_id.station_code,
                                   channel='*'+slave_pick.waveform_id.
                                   channel_code[-1])[0]
                    else:
                        print('No slave data for ' +
                              slave_pick.waveform_id.station_code + '.' +
                              slave_pick.waveform_id.channel_code)
                        print(pick.waveform_id.station_code + '.' +
                              pick.waveform_id.channel_code + ' ' +
                              slave_sfile + ' ' + master_sfile)
                        break
                    # Correct the picks
                    try:
                        correction, cc =\
                            xcorr_pick_correction(pick.time, mastertr,
                                                  slave_pick.time,
                                                  slavetr, pre_pick,
                                                  extract_len - pre_pick,
                                                  shift_len, filter="bandpass",
                                                  filter_options={'freqmin':
                                                                  lowcut,
                                                                  'freqmax':
                                                                  highcut},
                                                  plot=plotvar)
                        # Get the differntial travel time using the
                        # corrected time.
                        # Check that the correction is within the allowed shift
                        # This can occur in the obspy routine when the
                        # correlation function is increasing at the end of the
                        # window.
                        if abs(correction) > shift_len:
                            warnings.warn('Shift correction too large, ' +
                                          'will not use')
                            continue
                        correction = (pick.time - master_ori_time) -\
                            (slave_pick.time + correction - slave_ori_time)
                        links += 1
                        if cc * cc >= coh_thresh:
                            if coherence_weight:
                                weight = cc * cc
                            else:
                                weight = cc
                            phases += 1
                            # added by Caro
                            event_text += pick.waveform_id.station_code.\
                                ljust(5) + _cc_round(correction, 3).\
                                rjust(11) + _cc_round(weight, 3).rjust(8) +\
                                ' '+pick.phase_hint+'\n'
                            event_text2 += pick.waveform_id.station_code\
                                .ljust(5).upper() +\
                                _cc_round(correction, 3).rjust(11) +\
                                _cc_round(weight, 3).rjust(8) +\
                                ' '+pick.phase_hint+'\n'

                            # links+=1
                        corr_list.append(cc * cc)
                    except:
                        # Should warn here
                        msg = "Couldn't compute correlation correction"
                        warnings.warn(msg)
                        continue
            if links >= min_link and phases > 0:
                f.write(event_text)
                f2.write(event_text2)
    if plotvar:
        plt.hist(corr_list, 150)
        plt.show()
    # f.write('\n')
    f.close()
    f2.close()
    return
Ejemplo n.º 17
0
def pro4statics(eq_file,
                use_ref_trace=0,
                ref_trace='nothing',
                event_no=0,
                dphase='PcP',
                dphase2='PKiKP',
                dphase3='P',
                dphase4='PP',
                start_beam=-1,
                end_beam=3,
                plot_scale_fac=0.05,
                start_buff=-10,
                end_buff=30,
                qual_threshold=0,
                corr_threshold=0,
                max_time_shift=2,
                min_dist=17,
                max_dist=21,
                ARRAY=0,
                auto_dist=1):

    from obspy import UTCDateTime
    from obspy.signal.cross_correlation import xcorr_pick_correction
    from obspy import Stream
    from obspy import Trace
    from obspy import read
    from obspy.geodetics import gps2dist_azimuth
    import numpy as np
    import os
    import sys
    from obspy.taup import TauPyModel
    import matplotlib.pyplot as plt
    model = TauPyModel(model='iasp91')

    import warnings  # don't show any warnings
    if not sys.warnoptions:
        warnings.simplefilter("ignore")

    print('pro4_get_shifts is starting')

    #%% Get station location file
    if ARRAY == 0:  # Hinet set
        sta_file = '/Users/vidale/Documents/GitHub/Array_codes/Files/sta_hinet.txt'
    elif ARRAY == 1:  # LASA set
        sta_file = '/Users/vidale/Documents/GitHub/Array_codes/Files/sta_LASA.txt'
    elif ARRAY == 2:  # China set
        sta_file = '/Users/vidale/Documents/GitHub/Array_codes/Files/sta_ch.txt'
    with open(sta_file, 'r') as file:
        lines = file.readlines()
    print('    ' + str(len(lines)) + ' stations read from ' + sta_file)
    # Load station coords into arrays
    # old line: station_index = range(343)
    station_index = range(len(lines))
    st_lats = []
    st_lons = []
    st_deps = []
    st_names = []
    for ii in station_index:
        line = lines[ii]
        split_line = line.split()
        st_names.append(split_line[0])
        st_lats.append(split_line[1])
        st_lons.append(split_line[2])
        st_deps.append(split_line[3])

    if ARRAY == 0:  # stupid kludge to reduce Hi-net names by one letter and equalize capitalization
        for ii in station_index:
            tested_name = st_names[ii]
            this_name_truc = tested_name[0:5]
            name_truc_cap = this_name_truc.upper()
            st_names[ii] = name_truc_cap

    # initialize lists of statics
    sta_names = []
    sta_dists = []
    sta_lats = []
    sta_lons = []
    sta_statics = []
    sta_corrs = []

    #%% Parameter list
    #dphase  = 'PKIKP'       # phase to be aligned
    #dphase2 = 'PKiKP'      # another phase to have traveltime plotted
    #dphase3 = 'PKP'        # another phase to have traveltime plotted
    #dphase4 = 'pP'        # another phase to have traveltime plotted
    #ref_trace = 'N.SZW'   # trace with reference waveform
    #start_beam = 2       # start of correlation window (more positive is earlier)
    start_beam = -start_beam
    #end_beam   = 7       # plots end Xs before PKiKP
    #max_time_shift = 2       # searches up to this time shift for alignment
    #corr_threshold = 0.  # threshold that correlation is good enough to keep trace
    #max_dist = 151
    #min_dist = 150.6
    #plot_scale_fac = 0.2    #  Bigger numbers make each trace amplitude bigger on plot
    #qual_threshold =  0 # minimum SNR
    plot_tt = True  # plot the traveltimes?
    plot_flag = False  # plot for each trace?  Watch out, can be lots, one for each station pair!!
    min_dist_auto = 180  # for use in auto-scaling y axis in trace gathers
    max_dist_auto = 0

    #%% Get saved event info, also used to name files
    #  event 2016-05-28T09:47:00.000 -56.241 -26.935 78
    file = open('/Users/vidale/Documents/PyCode/EvLocs/' + eq_file, 'r')
    lines = file.readlines()
    split_line = lines[0].split()
    #            ids.append(split_line[0])  ignore label, now "event"
    t = UTCDateTime(split_line[1])
    date_label = split_line[1][0:10]
    ev_lat = float(split_line[2])
    ev_lon = float(split_line[3])
    ev_depth = float(split_line[4])

    print('        Date label ' + date_label + ' lat ' + str(ev_lat) +
          ' lon ' + str(ev_lon))

    st = Stream()
    #    fname     = 'HD' + date_label + '.mseed'
    fname = 'HD' + date_label + 'sel.mseed'  # sel file has windowing, shift?, filtering

    print('        File ' + fname)

    os.chdir('/Users/vidale/Documents/PyCode/Pro_Files/')
    os.system('pwd')
    st = read(fname)
    print('    ' + str(len(st)) + '  traces read in')
    print('         First trace has : ' + str(len(st[0].data)) + ' time pts ')

    #%% Reference trace
    trim_start = t + start_buff
    trim_end = t + end_buff
    time_buff = end_buff - start_buff
    tr_ref = Trace()
    #%% Stack reference trace
    if use_ref_trace == 0:
        counter = 0
        for tr in st:  # loop over seismograms to find reference trace, put it in tr_ref
            if counter == 0:  # copy first trace to stack
                tr_ref = tr.copy()
                tr_ref.stats.station = 'STACK'
                tr_ref.trim(starttime=trim_start - time_buff,
                            endtime=trim_end + time_buff)
                nt_ref = len(tr_ref.data)
                tr_ref.normalize()
                counter = counter + 1
            else:  # add the rest of the traces to stack
                tr_add = tr.copy()
                tr_add.trim(starttime=trim_start - time_buff,
                            endtime=trim_end + time_buff)
                nt_add = len(tr_ref.data)
                tr_add.normalize()

                for it in range(nt_ref):  # add seismogram one point at a time
                    if nt_ref != nt_add:  # are seismograms the same length?
                        print(
                            'trying to stack seismograms of different lengths, debug!'
                        )
                    tr_ref.data[it] += tr_add[it]
                counter = counter + 1
        tr_ref.data = tr_ref.data / counter

    #%% Pick reference trace
    if use_ref_trace == 1:
        for tr in st:  # loop over seismograms to find reference trace, put it in tr_ref
            if (tr.stats.station == ref_trace):  # found it
                tr_ref = tr.copy()
                tr_ref.trim(starttime=trim_start - time_buff,
                            endtime=trim_end + time_buff)
                nt_ref = len(tr_ref.data)
                tr_ref.normalize()
                print('        found reference station ' + tr.stats.station)
    if len(tr_ref.data) == 0:
        sys.exit('Reference trace empty, will not work!')

    #%% Plot reference trace
    plt.close(4)
    plt.figure(4, figsize=(10, 10))
    plt.xlim(start_buff, end_buff)
    plt.ylim(min(tr_ref.data), max(tr_ref.data))

    time = np.arange(nt_ref) * tr_ref.stats.delta + start_buff
    plt.plot(time, tr_ref.data, color='black')
    plt.xlabel('Time (s)')
    if use_ref_trace == 1:
        plt.title('Reference trace ' + dphase + ' for ' + fname[2:12] + '  ' +
                  ref_trace)
        plt.ylabel('Normed amp')
    else:
        plt.title('Summed reference trace ' + dphase + ' for ' + fname[2:12] +
                  '   ' + str(event_no))
        plt.ylabel('Average amp, each trace normed to 1')
    plt.show()

    stgood = Stream()
    st2 = st.copy(
    )  # hard to measure timing of traces without adjusting entire thing
    # print('st2 has: ' + str(len(st)) + ' traces' + ' t (origin time) ' + str(t))
    print('        Ref time ' + str(t) +
          ' start_beam end_beam max_time_shift ' + str(start_beam) + '  ' +
          str(end_beam) + '  ' + str(max_time_shift) + '  ')

    #  get station lat-lon, compute distance for plot
    good_corr = 0
    bad_corr = 0
    for tr in st:  # do all seismograms
        if tr.stats.station in st_names:  # find station in inventory
            ii = st_names.index(tr.stats.station)
            #  print('found Station ' + this_name + '  ' + actual_trace)
            stalon = float(st_lons[ii])  # look up lat & lon to find distance
            stalat = float(st_lats[ii])
            distance = gps2dist_azimuth(stalat, stalon, ev_lat, ev_lon)
            tr.stats.distance = distance[0] / (
                1000. * 111)  # distance for phase time and plotting

            if tr.stats.distance < min_dist_auto:  # for auto-scaling y-axis in trace gather plots
                min_dist_auto = tr.stats.distance
            if tr.stats.distance > max_dist_auto:
                max_dist_auto = tr.stats.distance

            arrivals = model.get_travel_times(
                source_depth_in_km=ev_depth,
                distance_in_degree=tr.stats.distance,
                phase_list=[dphase])
            #                 print(tr.stats.station + '  ' + tr_ref.stats.station + ' start_corr ' +
            #                    str(start_beam) + ' end ' + str(end_beam))
            try:
                dt, coeff = xcorr_pick_correction(t,
                                                  tr_ref,
                                                  t,
                                                  tr,
                                                  start_beam,
                                                  end_beam,
                                                  max_time_shift,
                                                  plot=plot_flag)
                if dt > max_time_shift:
                    print('Hey!  Excess shift: %.3f' % dt)
                    print('Station ' + tr.stats.station + ' corr is ' +
                          str(coeff))
                if coeff > 1:
                    print('Hey!  Excess coeff: %.3f' % coeff)
                    print('Station ' + tr.stats.station + ' corr is ' +
                          str(coeff))
                if coeff > corr_threshold:
                    good_corr += 1
                    if plot_flag == True:
                        print('Time correction for pick 2: %.6f' % dt)
                        print('Correlation coefficient: %.2f' % coeff)
                    tr.stats.starttime -= dt
                    sta_names.extend([tr.stats.station])
                    sta_dists.extend([tr.stats.distance])
                    sta_lats.extend([stalat])
                    sta_lons.extend([stalon])
                    sta_statics.extend([dt])
                    sta_corrs.extend([coeff])
                    stgood += tr
                else:
                    bad_corr += 1
            except:
                print('        No time shift for ' + tr.stats.station +
                      ' at distance ' + str(tr.stats.distance))

    ##        # store shift to write out
    ##            if coeff > corr_threshold:
    #            # write out station_name, dt, coeff
    #            # record shifted waveform in stgood
    print('    ' + str(good_corr) + ' traces with good correlation')
    if (good_corr == 0):
        sys.exit('No traces is a failure')
    print('    ' + str(bad_corr) + '  traces with bad correlation')
    print('    ' + str(good_corr + bad_corr) + ' out of total')
    print('        corr threshhold is ' + str(corr_threshold))

    plt.close(5)
    plt.figure(5, figsize=(10, 10))
    plt.xlim(start_buff, end_buff)
    plt.ylim(min_dist, max_dist)

    if auto_dist == 1:
        dist_diff = max_dist_auto - min_dist_auto  # add space at extremes
        plt.ylim(min_dist_auto - 0.1 * dist_diff,
                 max_dist_auto + 0.1 * dist_diff)
    else:
        plt.ylim(min_dist, max_dist)

    for tr in stgood:
        dist_offset = tr.stats.distance  # trying for approx degrees
        time = np.arange(len(tr.data)) * tr.stats.delta + (tr.stats.starttime -
                                                           t)
        plt.plot(time, (tr.data - np.median(tr.data)) * plot_scale_fac /
                 (tr.data.max() - tr.data.min()) + dist_offset,
                 color='black')

    #%% Plot before shift
    if plot_tt:
        # first traveltime curve
        line_pts = 50
        dist_vec = np.arange(min_dist, max_dist,
                             (max_dist - min_dist) / line_pts)  # distance grid
        time_vec1 = np.arange(
            min_dist, max_dist, (max_dist - min_dist) /
            line_pts)  # empty time grid of same length (filled with -1000)
        for i in range(0, line_pts):
            arrivals = model.get_travel_times(source_depth_in_km=ev_depth,
                                              distance_in_degree=dist_vec[i],
                                              phase_list=[dphase])
            num_arrivals = len(arrivals)
            found_it = 0
            for j in range(0, num_arrivals):
                if arrivals[j].name == dphase:
                    time_vec1[i] = arrivals[j].time
                    found_it = 1
            if found_it == 0:
                time_vec1[i] = np.nan
    # second traveltime curve
        if dphase2 != 'no':
            time_vec2 = np.arange(
                min_dist, max_dist, (max_dist - min_dist) /
                line_pts)  # empty time grid of same length (filled with -1000)
            for i in range(0, line_pts):
                arrivals = model.get_travel_times(
                    source_depth_in_km=ev_depth,
                    distance_in_degree=dist_vec[i],
                    phase_list=[dphase2])
                num_arrivals = len(arrivals)
                found_it = 0
                for j in range(0, num_arrivals):
                    if arrivals[j].name == dphase2:
                        time_vec2[i] = arrivals[j].time
                        found_it = 1
                if found_it == 0:
                    time_vec2[i] = np.nan
            plt.plot(time_vec2, dist_vec, color='orange')
        # third traveltime curve
        if dphase3 != 'no':
            time_vec3 = np.arange(
                min_dist, max_dist, (max_dist - min_dist) /
                line_pts)  # empty time grid of same length (filled with -1000)
            for i in range(0, line_pts):
                arrivals = model.get_travel_times(
                    source_depth_in_km=ev_depth,
                    distance_in_degree=dist_vec[i],
                    phase_list=[dphase3])
                num_arrivals = len(arrivals)
                found_it = 0
                for j in range(0, num_arrivals):
                    if arrivals[j].name == dphase3:
                        time_vec3[i] = arrivals[j].time
                        found_it = 1
                if found_it == 0:
                    time_vec3[i] = np.nan
            plt.plot(time_vec3, dist_vec, color='yellow')
        # fourth traveltime curve
        if dphase4 != 'no':
            time_vec4 = np.arange(
                min_dist, max_dist, (max_dist - min_dist) /
                line_pts)  # empty time grid of same length (filled with -1000)
            for i in range(0, line_pts):
                arrivals = model.get_travel_times(
                    source_depth_in_km=ev_depth,
                    distance_in_degree=dist_vec[i],
                    phase_list=[dphase4])
                num_arrivals = len(arrivals)
                found_it = 0
                for j in range(0, num_arrivals):
                    if arrivals[j].name == dphase4:
                        time_vec4[i] = arrivals[j].time
                        found_it = 1
                if found_it == 0:
                    time_vec4[i] = np.nan
            plt.plot(time_vec4, dist_vec, color='purple')

        plt.plot(time_vec1, dist_vec, color='blue')
        plt.show()

    plt.xlabel('Time (s)')
    plt.ylabel('Epicentral distance from event (°)')
    plt.title('Post-alignment ' + dphase + ' for ' + fname[2:12] + '   ' +
              str(event_no))
    plt.show()

    # plot traces
    plt.close(6)
    plt.figure(6, figsize=(10, 10))
    plt.xlim(start_buff, end_buff)
    plt.ylim(min_dist, max_dist)

    if auto_dist == 1:
        dist_diff = max_dist_auto - min_dist_auto  # add space at extremes
        plt.ylim(min_dist_auto - 0.1 * dist_diff,
                 max_dist_auto + 0.1 * dist_diff)
        max_dist = max_dist_auto
        min_dist = min_dist_auto
    else:
        plt.ylim(min_dist, max_dist)

    for tr in st2:  # regenerate distances into st2 as they were loaded into st for plots
        if tr.stats.station in st_names:  # find station in station list
            ii = st_names.index(tr.stats.station)
            stalon = float(st_lons[ii])  # look up lat & lon to find distance
            stalat = float(st_lats[ii])
            distance = gps2dist_azimuth(stalat, stalon, ev_lat, ev_lon)
            tr.stats.distance = distance[0] / (
                1000. * 111)  # distance for phase time and plotting

    for tr in st2:  # generate plot
        dist_offset = tr.stats.distance  # trying for approx degrees
        time = np.arange(len(tr.data)) * tr.stats.delta + (tr.stats.starttime -
                                                           t)
        plt.plot(time, (tr.data - np.median(tr.data)) * plot_scale_fac /
                 (tr.data.max() - tr.data.min()) + dist_offset,
                 color='black')

    #%% Plot after shift
    if plot_tt:
        # first traveltime curve
        line_pts = 50
        dist_vec = np.arange(min_dist, max_dist,
                             (max_dist - min_dist) / line_pts)  # distance grid
        time_vec1 = np.arange(
            min_dist, max_dist, (max_dist - min_dist) /
            line_pts)  # empty time grid of same length (filled with -1000)
        for i in range(0, line_pts):
            arrivals = model.get_travel_times(source_depth_in_km=ev_depth,
                                              distance_in_degree=dist_vec[i],
                                              phase_list=[dphase])
            num_arrivals = len(arrivals)
            found_it = 0
            for j in range(0, num_arrivals):
                if arrivals[j].name == dphase:
                    time_vec1[i] = arrivals[j].time
                    found_it = 1
            if found_it == 0:
                time_vec1[i] = np.nan
    # second traveltime curve
        if dphase2 != 'no':
            time_vec2 = np.arange(
                min_dist, max_dist, (max_dist - min_dist) /
                line_pts)  # empty time grid of same length (filled with -1000)
            for i in range(0, line_pts):
                arrivals = model.get_travel_times(
                    source_depth_in_km=ev_depth,
                    distance_in_degree=dist_vec[i],
                    phase_list=[dphase2])
                num_arrivals = len(arrivals)
                found_it = 0
                for j in range(0, num_arrivals):
                    if arrivals[j].name == dphase2:
                        time_vec2[i] = arrivals[j].time
                        found_it = 1
                if found_it == 0:
                    time_vec2[i] = np.nan
            plt.plot(time_vec2, dist_vec, color='orange')
        # third traveltime curve
        if dphase3 != 'no':
            time_vec3 = np.arange(
                min_dist, max_dist, (max_dist - min_dist) /
                line_pts)  # empty time grid of same length (filled with -1000)
            for i in range(0, line_pts):
                arrivals = model.get_travel_times(
                    source_depth_in_km=ev_depth,
                    distance_in_degree=dist_vec[i],
                    phase_list=[dphase3])
                num_arrivals = len(arrivals)
                found_it = 0
                for j in range(0, num_arrivals):
                    if arrivals[j].name == dphase3:
                        time_vec3[i] = arrivals[j].time
                        found_it = 1
                if found_it == 0:
                    time_vec3[i] = np.nan
            plt.plot(time_vec3, dist_vec, color='yellow')
        # fourth traveltime curve
        if dphase4 != 'no':
            time_vec4 = np.arange(
                min_dist, max_dist, (max_dist - min_dist) /
                line_pts)  # empty time grid of same length (filled with -1000)
            for i in range(0, line_pts):
                arrivals = model.get_travel_times(
                    source_depth_in_km=ev_depth,
                    distance_in_degree=dist_vec[i],
                    phase_list=[dphase4])
                num_arrivals = len(arrivals)
                found_it = 0
                for j in range(0, num_arrivals):
                    if arrivals[j].name == dphase4:
                        time_vec4[i] = arrivals[j].time
                        found_it = 1
                if found_it == 0:
                    time_vec4[i] = np.nan
            plt.plot(time_vec4, dist_vec, color='purple')

        plt.plot(time_vec1, dist_vec, color='blue')
        plt.show()

    plt.xlabel('Time (s)')
    plt.ylabel('Epicentral distance from event (°)')
    plt.title('Pre-alignment ' + dphase + ' for ' + fname[2:12] + '   ' +
              str(event_no))
    plt.show()

    #  Save stats
    fname_stats = '/Users/vidale/Documents/PyCode/Mseed/fine_statics.txt'

    #  Save station static correction files
    #fname_stats = 'Statics' + etime[:10] + dphase + ref_trace + '.txt'
    stats_file = open(fname_stats, 'w')
    len_file1 = len(sta_names)
    for j in range(0, len_file1):
        dist_str = '{:.2f}'.format(
            sta_dists[j])  # 3 digits after decimal place
        lat_str = '{:.4f}'.format(sta_lats[j])  # 2 digits after decimal place
        lon_str = '{:.4f}'.format(sta_lons[j])
        stat_str = '{:.3f}'.format(sta_statics[j])
        corr_str = '{:.3f}'.format(sta_corrs[j])
        write_line = sta_names[
            j] + ' ' + dist_str + ' ' + lat_str + ' ' + lon_str + ' ' + stat_str + ' ' + corr_str + '\n'
        stats_file.write(write_line)
    file.close()
    # print('    ' + str(len_file1) + '  traces are in correlation file')


#     os.system('say "Done"')
Ejemplo n.º 18
0
def get_spec_ratio(data_small, mdata, data_large, station, args, debug=0):
    """
	generate spectral ratios
	preprocessing of Kiban network metadata.
	DO NOT USE
	function only works if you named your files exactly like I did -
	TODO
	update to take in all file types regardless of name type
	"""
    flims = [-1, np.log10(50)]  #the limits of the log binning
    #unpack arguments
    prefilter = args.F
    stype = args.P  #phase to generate the spectral ratio.
    winlength = args.W
    shift = args.S
    nwins = args.N
    nlogbins = args.lb
    fftype = args.ft
    edata = data_small[0]
    #load data from small and large events
    #	ms_small=data_small[0]
    #	ela=float(ms_small.split('_evla')[-1].split('_')[0])
    #	elo=float(ms_small.split('_lo')[1].split('_')[0])
    #	edp=float(ms_small.split('_dp')[1].split('_')[0])
    #	pick_small=data_small[1]
    #	stla=float(ms_small.split('stla')[-1].split('_')[0])
    #	stlo=float(ms_small.split('_lo')[-1].split('.ms')[0])
    #	egf_name=ms_small.split('evt_')[-1].split('_evla')[0]
    #	master_name=ms_large.split('evt_')[-1].split('_evla')[0]
    #	mla=float(ms_large.split('_evla')[-1].split('_')[0])
    #	mlo=float(ms_large.split('_lo')[1].split('_')[0])
    #	mdp=float(ms_large.split('_dp')[1].split('_')[0])
    #	edata=read(ms_small)
    #	mdata=read(ms_large)
    ela = data_small[1].la
    elo = data_small[1].lo
    edp = data_small[1].dp
    pick_small = data_small[1].pick
    stla = data_small[1].stla
    stlo = data_small[1].stlo
    egf_name = data_small[1].name
    master_name = data_large.name
    mla = data_large.la
    mlo = data_large.lo
    mdp = data_large.dp
    #select the channel for the data
    #use alternate names for channels from different sources
    echannels = [tr.stats.channel for tr in edata]
    mchannels = [tr.stats.channel for tr in mdata]
    empty_specrat = specrat(np.asarray([]), np.asarray([]), np.asarray([]),
                            master_name, mla, mlo, egf_name, ela, elo, station,
                            stla, stlo, args.C)
    if not args.C in echannels and len(
            list(set(echannels) & set(args.altC))) == 0:
        if debug > 0:
            print('we didnt find this channel in egf at station ' + station)
        return empty_specrat
    if not args.C in mchannels and len(
            list(set(mchannels) & set(args.altC))) == 0:
        if debug > 0:
            print('we didnt find this channel in master at station ' + station)
        return empty_specrat
    allchans = args.altC
    allchans.append(args.C)
    echannel = list(set(echannels) & set(allchans))[0]
    mchannel = list(set(mchannels) & set(allchans))[0]
    edata = edata.select(channel=echannel)[0]
    mdata = mdata.select(channel=mchannel)[0]
    mdata.detrend()
    mdata.detrend('demean')
    edata.detrend()
    edata.detrend('demean')
    if prefilter:
        #		flims=np.log10(prefilter)
        edata.filter(type='bandpass',
                     freqmin=prefilter[0],
                     freqmax=prefilter[1])
        mdata.filter(type='bandpass',
                     freqmin=prefilter[0],
                     freqmax=prefilter[1])
    #find the correct picks based on phase desired
#	pick_large=str(pick_large)
    pick_large = data_large.pick
    #	pick_small=read_events(pick_small)
    #	pick_small=pick_small[0].picks
    #	pick_large = read_events(pick_large)
    #	pick_large = pick_large[0].picks
    #	print(station)
    spick_large = [
        ipick for ipick in pick_large if ipick.phase_hint == 'S'
        and ipick.waveform_id.station_code[-4:] == station[-4:]
    ]
    spick_small = [
        ipick for ipick in pick_small if ipick.phase_hint == 'S'
        and ipick.waveform_id.station_code[-4:] == station[-4:]
    ]
    ppick_large = [
        ipick for ipick in pick_large if ipick.phase_hint == 'P'
        and ipick.waveform_id.station_code[-4:] == station[-4:]
    ]
    ppick_small = [
        ipick for ipick in pick_small if ipick.phase_hint == 'P'
        and ipick.waveform_id.station_code[-4:] == station[-4:]
    ]
    if stype == 'S' or stype == 'coda' or stype == 'lateS':
        pick_large = spick_large
        pick_small = spick_small
    if stype == 'P':
        pick_large = ppick_large
        pick_small = ppick_small
    if len(pick_large) < 1 or len(pick_small) < 1:
        if debug > 0:
            print('no pick found in the pickfile for station ' + station)
        return empty_specrat
    print(pick_large[0].waveform_id.station_code)
    print(pick_small[0].waveform_id.station_code)
    pick_large = pick_large[0].time
    pick_small = pick_small[0].time
    #	print('pick large time: ')
    #	print(pick_large)
    #	print(mdata.stats.starttime)
    #	print(mdata.stats.endtime)
    #	print('pick smal time')
    #	print(pick_small)
    #	print(edata.stats.starttime)
    #	print(edata.stats.endtime)
    try:
        if stype == 'coda' or stype == 'S' or stype == 'lateS':
            dt, coeff = xcorr_pick_correction(pick_large,
                                              mdata,
                                              pick_small,
                                              edata,
                                              t_before=0.25,
                                              t_after=1.0,
                                              cc_maxlag=1.5,
                                              filter="bandpass",
                                              filter_options={
                                                  'freqmin': 0.5,
                                                  'freqmax': 5.0
                                              })
        else:
            dt, coeff = xcorr_pick_correction(pick_large,
                                              mdata,
                                              pick_small,
                                              edata,
                                              t_before=0.25,
                                              t_after=1.0,
                                              cc_maxlag=1.5,
                                              filter="bandpass",
                                              filter_options={
                                                  'freqmin': 0.5,
                                                  'freqmax': 10.0
                                              })
    except:
        logging.exception('values at exception:')
        return empty_specrat
    pick_small = pick_small + dt
    if stype == 'coda':
        ttime_l = pick_large - mdata.stats.starttime
        ttime_s = pick_small - edata.stats.starttime
        pick_large = mdata.stats.starttime + 1.5 * ttime_l
        pick_small = edata.stats.starttime + 1.5 * ttime_s
    if stype == 'lateS':
        pick_large = pick_large + 2
        pick_small = pick_small + 2
    #generate p coda noise windo
#	if stype == 'coda' or stype == 'lateS' or stype == 'S':
    picknp_large = spick_large[0].time - winlength - 1
    picknp_small = spick_small[0].time - winlength - 1
    #generate pre p noise window
    if len(ppick_large) < 1:
        pickn_large = mdata.stats.starttime
    else:
        pickn_large = ppick_large[0].time - winlength - 1
    if len(ppick_small) < 1:
        pickn_small = edata.stats.starttime
    else:
        pickn_small = ppick_small[0].time - winlength - 1
        if pickn_large - mdata.stats.starttime < 0:
            pickn_large = mdata.stats.starttime
        if pickn_small - edata.stats.starttime < 0:
            pickn_small = edata.stats.starttime
    ts1 = mdata.copy()
    ts1.trim(pick_large - 2, pick_large + 8)
    ts2 = edata.copy()
    ts2.trim(pick_small - 2, pick_small + 8)
    if fftype == 'multitaper':
        try:
            freq1, spec1 = spectrum_gen(mdata, pick_large, args, debug=debug)
            freq2, spec2 = spectrum_gen(edata, pick_small, args, debug=debug)
            argsn = copy.deepcopy(args)
            #generate snr
            argsn.N = 1
            freqn0, specn0 = spectrum_gen(mdata,
                                          pickn_large,
                                          argsn,
                                          debug=debug)
            freqn, specn = spectrum_gen(edata, pickn_small, argsn, debug=debug)
            freqn0p, specn0p = spectrum_gen(mdata,
                                            picknp_large,
                                            argsn,
                                            debug=debug)
            freqnp, specnp = spectrum_gen(edata,
                                          picknp_small,
                                          argsn,
                                          debug=debug)
            freq = freq1
        except:
            logging.exception('values at exception:')
            return empty_specrat
        if len(freq) < 1 or len(spec1) < 1 or len(spec2) < 1:
            if debug > 0:
                print('we were not able to generate spectrum')
            return empty_specrat
        if not len(spec1) == len(spec2) or not len(spec2) == len(specn):
            if debug > 0:
                print('length of spectra are not equal! check the picks')
            return empty_specrat
        try:
            #fill very high SNR with some finite value
            esnr = np.divide(spec2, specn)
            msnr = np.divide(spec1, specn0)
            esnr[np.isnan(esnr)] = 100.0
            msnr[np.isnan(msnr)] = 100.0
            snr = np.minimum(esnr, msnr)
            specratio = np.divide(spec1, spec2)
            snrpcoda1 = np.divide(spec1, specn0p)
            snrpcoda2 = np.divide(spec2, specnp)
            snrpcoda1[np.isnan(snrpcoda1)] = 100.0
            snrpcoda2[np.isnan(snrpcoda2)] = 100.0
            snrpcoda = np.minimum(snrpcoda1, snrpcoda2)
        except:
            logging.exception('values at exception:')
            return empty_specrat
    elif fftype == 'decon':
        try:
            freq, specratio, spec1, spec2, decon, msnr, esnr, snrpcoda1, snrpcoda2 = spectrum_gen(
                mdata,
                pick_large,
                args,
                usedecon=True,
                ts_in2=edata,
                pick2=pick_small,
                npick=pickn_large,
                npick2=pickn_small,
                npickp=picknp_large,
                npickp2=picknp_small,
                debug=debug)
        except:
            logging.exception('values at exception')
            return empty_specrat
        M = np.arange(0, len(decon))
        N = len(M)
        SeD = np.where(np.logical_and(M >= 0, M < N / 2))
        d1 = decon[SeD]
        SeD2 = np.where(np.logical_and(M > N / 2, M <= N + 1))
        d2 = decon[SeD2]
        stf = np.concatenate((d2, d1))
        stf /= stf.max()
        snr = np.minimum(esnr, msnr)
        snrpcoda = np.minimum(snrpcoda1, snrpcoda2)
    specratio = specrat(freq,
                        specratio,
                        snr,
                        master_name,
                        mla,
                        mlo,
                        egf_name,
                        ela,
                        elo,
                        station,
                        stla,
                        stlo,
                        args.C,
                        mdp=mdp,
                        edp=edp,
                        mastertr=ts1,
                        egftr=ts2,
                        mspec=spec1,
                        espec=spec2,
                        xcorr=coeff,
                        snrPcoda=snrpcoda,
                        msnr=msnr,
                        esnr=esnr,
                        msnrPcoda=snrpcoda1,
                        esnrPcoda=snrpcoda2)
    if fftype == 'decon':
        specratio.stf = stf
    return specratio
Ejemplo n.º 19
0
def plot(specratios,
         min_snr=0.0,
         min_xcorr=0.0,
         snrtype='Pcoda',
         freqmin=0.5,
         freqmax=5.0,
         savefile=None):
    """
	plots the spectral ratios by station.
	data can be selected by holign the spectra by min_snr
	or by throwing out egf events that do not exceed min_xcorr.
	to calculate xcorr and for plotting purposes the user
	can change the bandwidth by modifying freqmin and freqmax.
	the default is between 0.5 and 5 Hz which is the range
	of corner frequencies for M3-5 earthquakes at about 100 km depth.
	"""
    from mpl_toolkits.basemap import Basemap
    fig = plt.figure(figsize=(10, 7))
    ax1 = plt.subplot2grid((7, 3), (0, 0), rowspan=7)
    ax2 = plt.subplot2grid((7, 3), (0, 1))  #master event trace
    ax3 = plt.subplot2grid((7, 3), (1, 1), rowspan=6)  #egf traces
    #	ax7=plt.subplot2grid((7,3),(5,1), rowspan=2) #stfs.
    ax4 = plt.subplot2grid((7, 3), (0, 2))  #master spectrum
    ax5 = plt.subplot2grid((7, 3), (1, 2), rowspan=3)  #egf spectrum
    ax6 = plt.subplot2grid((7, 3), (4, 2), rowspan=3)  #spetral ratios
    print('len specrats was ' + str(len(specratios)))
    specrats = [specrat for specrat in specratios if specrat.datapercent > 0.4]
    print('len specrats is ' + str(len(specrats)))  #spectral ratios
    if len(specrats) < 1:
        return
    xcorrs = np.asarray([np.max(specrat.xcorr) for specrat in specrats])
    inds = np.argsort(xcorrs)
    new_specrats = []
    for ind in inds:
        new_specrats.append(specrats[ind])
    specrats = new_specrats
    print('len specrats is ' + str(len(specrats)))
    stla = specrats[0].stla
    stlo = specrats[0].stlo
    mla = specrats[0].mla
    mlo = specrats[0].mlo
    mastertr = specrats[0].mastertr
    mastertr.detrend()
    mastertr.detrend('demean')
    mastertr.filter('bandpass', freqmin=freqmin, freqmax=freqmax)
    mspec = specrats[0].mspec
    tarray = np.arange(len(mastertr)) * mastertr.stats.delta  #time array
    if len(mspec) < 1:
        print('theres no spectrum?')
        return
    print('loading maps')
    map1 = Basemap(projection='merc',
                   llcrnrlat=mla - 1.25,
                   llcrnrlon=mlo - 1.25,
                   urcrnrlat=mla + 1.25,
                   urcrnrlon=mlo + 1.25,
                   resolution='f',
                   ax=ax1)
    map1.drawmapboundary()
    map1.drawcoastlines()
    x, y = map1(stlo, stla)
    map1.scatter(x, y, marker='v', color='k')
    x1, y1 = map1(mlo, mla)
    map1.scatter(x1, y1, marker='*', color='r')
    ax2.plot(tarray, mastertr.data, color='r')
    ax2.set_xlim((tarray[0], tarray[-1]))
    ax4.loglog(specrats[0].freqs, mspec, basex=10, basey=10, color='r')
    ax4.set_xticklabels([])
    ax2.set_xticklabels([])
    ax2.set_yticklabels([])
    ax2.set_title('traces')
    ax4.set_title('spectra')
    ax6.set_xlabel('spec ratio vs. log frequency')
    #	ax6.set_ylabel('spectral ratio')
    #	ax3.set_xticklabels([])
    ax3.set_xlabel('realigned time (s)')
    ax3.set_yticklabels([])
    plt.suptitle(specrats[0].master + ' at ' + specrats[0].station)
    print('still loading maps')
    print('plotting spectral ratios')
    for ie, specrat in enumerate(specrats):
        if min_snr > 0.0:
            print('holing to min snr ' + str(min_snr))
            specrat.holed(min_snr=min_snr, holeby=snrtype)
        if min_xcorr > 0.0 and specrat.xcorr < min_xcorr:
            print('removing the spectral ratios with xcorr below ' +
                  str(min_xcorr))
            continue
        egftr = specrat.egftr
        egftr.detrend()
        egftr.detrend('demean')
        stf = specrat.stf
        egftr.filter('bandpass', freqmin=freqmin, freqmax=freqmax)
        dt, coeff = xcorr_pick_correction(mastertr.stats.starttime + 2,
                                          mastertr,
                                          egftr.stats.starttime + 2,
                                          egftr,
                                          t_before=0.25,
                                          t_after=1.0,
                                          cc_maxlag=1.5)
        ela = specrat.ela
        elo = specrat.elo
        xcorr = np.max(specrat.xcorr)
        #		stf_simple = deconvf(egftr, mastertr, mastertr.stats.sampling_rate)
        print('len stf is ' + str(len(stf)))
        if xcorr > 0.5:
            colour = 'g'
        elif xcorr > 0.3:
            colour = 'b'
        elif xcorr > 0.2:
            colour = 'c'
        else:
            colour = 'm'
        ax5.loglog(specrat.freqs,
                   specrat.espec,
                   basex=10,
                   basey=10,
                   color=colour)
        ax6.loglog(specrat.freqs,
                   specrat.specratio,
                   basex=10,
                   basey=10,
                   color=colour)
        plotdata = egftr.data / max(abs(egftr.data))
        datalen = min(len(plotdata), len(tarray))
        ax3.plot(tarray[0:datalen] + dt,
                 plotdata[0:datalen] + ie,
                 linewidth=0.6,
                 color=colour)
        ax3.text(0, ie, '{:0.2f}'.format(coeff))
        x2, y2 = map1(elo, ela)
        map1.scatter(x2, y2, marker='+', color=colour)
    #	try:
    #		if xcorr >0.2:
    #			ax7.plot(stf, color=colour)


#	#			ax7.plot(stf_simple, color='m')
#	except:
#		logging.exception('values at exception ')
    ax3.set_xlim((tarray[0], tarray[-1]))
    ax6.set_xlim((10**-1.2, 10**1.2))
    ax4.set_xlim((10**-1.2, 10**1.2))
    ax5.set_xlim((10**-1.2, 10**1.2))
    ax5.set_xticklabels([])
    ax3.set_ylim((ie - 12, ie + 1))
    if savefile:
        plt.savefig(savefile)
    else:
        plt.show()
    return
Ejemplo n.º 20
0
            tr1.detrend()
            tr1.filter("bandpass", freqmin=0.3, freqmax=0.9, corners=3)

            print('---> Start')
            print('this is trimmed')
            print(tr1)
            print(tr)

            # try:
            #     dt, coeff = xcorr_pick_correction(t1, tr1, t0, tr, -600, 600, 600, plot=False)
            #     print('made it past xcorr')
            # except Exception as e : print('cross correlation error')
            dt, coeff = xcorr_pick_correction(t1,
                                              tr1,
                                              t0,
                                              tr,
                                              -600,
                                              600,
                                              600,
                                              plot=False)
            if coeff > 0.6:
                print('  Filename:{0}'.format(filename))
                print("  Time correction for pick 2: %.6f" % dt)
                print("  Correlation coefficient: %.2f" % coeff)
                tnew = t0 + dt
                trnew = tr.trim(starttime=tnew - 10, endtime=tnew + 1200)
                tr2 = tr1.trim(starttime=t1 - 10, endtime=t1 + 1197.4)
                times1 = tr2.times()

                print('tr-NEW')
                print(trnew)
Ejemplo n.º 21
0
def pro4statics(eq_file, ref_trace = 'N.SZW',
				dphase = 'PKIKP', dphase2 = 'PKiKP', dphase3 = 'PKIKP', dphase4 = 'PKiKP',
				start_corr_win = -1, end_corr_win = 3, plot_scale_fac = 0.05,
				qual_threshold = 0, corr_threshold = 0,
				max_time_shift = 2, min_dist = 150, max_dist = 164, ARRAY = 0):

	from obspy import UTCDateTime
	from obspy.signal.cross_correlation import xcorr_pick_correction
	from obspy import Stream
	from obspy import Trace
	from obspy import read
	from obspy.geodetics import gps2dist_azimuth
	import numpy as np
	import os
	from obspy.taup import TauPyModel
	import matplotlib.pyplot as plt
	model = TauPyModel(model='iasp91')

	import sys # don't show any warnings
	import warnings

	if not sys.warnoptions:
	    warnings.simplefilter("ignore")
	#%% Get station location file
	if   ARRAY == 0: # Hinet set
		sta_file = '/Users/vidale/Documents/GitHub/Array_codes/Files/hinet_sta.txt'
	elif ARRAY == 1: # LASA set
		sta_file = '/Users/vidale/Documents/GitHub/Array_codes/Files/LASA_sta.txt'
	with open(sta_file, 'r') as file:
		lines = file.readlines()
	print('Station file has ' + str(len(lines)) + ' lines.')
	# Load station coords into arrays
		# old line: station_index = range(343)
	station_index = range(len(lines))
	st_lats  = []
	st_lons  = []
	st_deps  = []
	st_names = []
	for ii in station_index:
		line = lines[ii]
		split_line = line.split()
		st_names.append(split_line[0])
		st_lats.append( split_line[1])
		st_lons.append( split_line[2])
		st_deps.append( split_line[3])

	# initialize lists of statics
	sta_names   = []
	sta_dists   = []
	sta_lats    = []
	sta_lons    = []
	sta_statics = []
	sta_corrs   = []

	#%%
	#dphase  = 'PKIKP'       # phase to be aligned
	#dphase2 = 'PKiKP'      # another phase to have traveltime plotted
	#dphase3 = 'PKP'        # another phase to have traveltime plotted
	#dphase4 = 'pP'        # another phase to have traveltime plotted
	#ref_trace = 'N.SZW'   # trace with reference waveform
	#start_corr_win = 2       # plots start Xs before PKiKP
	#end_corr_win   = 7       # plots end Xs before PKiKP
	#max_time_shift = 2       # searches up to this time shift for alignment
	start_plot_win = 0       # plots start Xs before PKiKP
	end_plot_win   = 20       # plots end Xs before PKiKP
	#corr_threshold = 0.  # threshold that correlation is good enough to keep trace
	#max_dist = 151
	#min_dist = 150.6
#	max_dist = 164
#	min_dist = 150
	#plot_scale_fac = 0.2    #  Bigger numbers make each trace amplitude bigger on plot
	#qual_threshold =  0 # minimum SNR
	plot_tt = 1           # plot the traveltimes?
	plot_flag = False     # plot for each trace?  Watch out, can be lots, one for each station pair!!

	#%% Get saved event info, also used to name files
	#  event 2016-05-28T09:47:00.000 -56.241 -26.935 78
	file = open(eq_file, 'r')
	lines=file.readlines()
	split_line = lines[0].split()
#			ids.append(split_line[0])  ignore label, now "event"
	t           = UTCDateTime(split_line[1])
	date_label  = split_line[1][0:10]
	ev_lat      = float(      split_line[2])
	ev_lon      = float(      split_line[3])
	ev_depth    = float(      split_line[4])

	print('Date label ' + date_label + ' lat ' + str(ev_lat) + ' lon ' + str(ev_lon))

	st = Stream()
#	fname     = 'HD' + date_label + '.mseed'
	fname     = 'HD' + date_label + 'sel.mseed'  # sel file has windowing, shift?, filtering

	print('fname ' + fname)

	os.chdir('/Users/vidale/Documents/PyCode/LASA/Pro_Files/')
	os.system('pwd')
	st=read(fname)
	print('Read in: ' + str(len(st)) + ' traces')
	print('First trace has : ' + str(len(st[0].data)) + ' time pts ')

	#%% reference trace, and its starttime, distance, arrival time
	tr_ref = Trace()
	for tr in st: # loop over seismograms to find reference trace
		if (tr.stats.station == ref_trace): # found it
			tr_ref = tr.copy()
			for ii in station_index: # find station in inventory
				this_name = st_names[ii] # disabled convoluted patch for long Hinet names
				this_name_truc = this_name[0:5]
				name_truc_cap  = this_name_truc.upper()
#				print(tr.stats.station + ' tr.stats.station ' +st_names[ii] + ' st_names[ii] ' + ref_trace + ' ref_trace ' + name_truc_cap + ' name_truc_cap ' + this_name + ' this_name ' + this_name_truc + ' this_name_truc')
#				if (tr.stats.station == name_truc_cap):# found it
				if (tr.stats.station == st_names[ii]):# found it
					print(tr.stats.station + ' tr.stats.station ' +st_names[ii] + ' st_names[ii] ')
#					sys.exit()
					stalon = float(st_lons[ii]) # look up lat & lon again to find distance
					stalat = float(st_lats[ii])
					distance = gps2dist_azimuth(stalat,stalon,ev_lat,ev_lon)
					tr_ref.stats.distance=distance[0]/(1000.*111) # distance in meters
					print('depth ' + str(ev_depth) + ' distance ' + str(tr_ref.stats.distance) + ' phase ' + dphase)
					arrivals = model.get_travel_times(source_depth_in_km=ev_depth,distance_in_degree
										=tr_ref.stats.distance,phase_list=[dphase])
					try:
						tr_ref_tt = arrivals[0].time # arrival time
					except:
						print('Station ' + tr.stats.station + ' at distance ' + str(tr_ref.stats.distance) + ' and depth ' + str(ev_depth))
						sys.exit("No arrival time for " + dphase)
#	sys.exit()

	stgood = Stream()
	st2 = st.copy()  # hard to measure timing of traces without adjusting entire thing
	print('st2 has: ' + str(len(st)) + ' traces' + ' t (origin time) ' + str(t))

	#  get station lat-lon, compute distance for plot
	good_corr = 0
	bad_corr = 0
	for tr in st: # do all seismograms
		for ii in station_index: # find station in inventory
			tested_name = st_names[ii]
			actual_trace = tr.stats.station
			if ARRAY == 0: # convoluted patch for long Hinet names
				this_name_truc = tested_name[0:5]
				name_truc_cap  = this_name_truc.upper()
				this_name = name_truc_cap
				actual_trace = tr.stats.station.upper
			if (actual_trace == tested_name): # found it
				tr_time = tr.stats.starttime  # tr_time apparently not used, a relic
				stalon = float(st_lons[ii]) # look up lat & lon to find distance
				stalat = float(st_lats[ii])
				distance = gps2dist_azimuth(stalat,stalon,ev_lat,ev_lon)
				tr.stats.distance=distance[0]/(1000.*111) # distance for phase time and plotting
				arrivals = model.get_travel_times(source_depth_in_km=ev_depth,distance_in_degree
									=tr.stats.distance,phase_list=[dphase])
				print('made it to here!!')
				try:
					dt, coeff = xcorr_pick_correction(t, tr_ref, t, tr,
							start_corr_win, end_corr_win, max_time_shift, plot=plot_flag)
					print('also made it to here!!')
					if dt > max_time_shift:
						print('Hey!  Excess shift: %.3f' % dt)
						print('Station ' + tr.stats.station + ' corr is ' + str(coeff))
					if coeff > 1:
						print('Hey!  Excess coeff: %.3f' % coeff)
						print('Station ' + tr.stats.station + ' corr is ' + str(coeff))
					if coeff > corr_threshold:
						good_corr += 1
						if plot_flag == True:
							print('Time correction for pick 2: %.6f' % dt)
							print('Correlation coefficient: %.2f' % coeff)
						tr.stats.starttime -= dt
						sta_names.extend([tr.stats.station])
						sta_dists.extend([tr.stats.distance])
						sta_lats.extend([stalat])
						sta_lons.extend([stalon])
						sta_statics.extend([dt])
						sta_corrs.extend([coeff])
						stgood += tr
					else:
						bad_corr += 1
				except:
					print('No arrival time for ' + tr.stats.station + ' at distance ' + str(tr.stats.distance))
		sys.exit()

	##		# store shift to write out
	##			if coeff > corr_threshold:
	#			# write out station_name, dt, coeff
	#			# record shifted waveform in stgood
	print(str(good_corr) + ' out of ' + str(good_corr + bad_corr) + ' are greater than ' + str(corr_threshold))


	#%%
	# plot traces
	plt.close(5)
	plt.figure(5,figsize=(10,10))
	plt.xlim(start_plot_win, end_plot_win)
	plt.ylim(min_dist, max_dist)

	for tr in stgood:
		dist_offset = tr.stats.distance # trying for approx degrees
		time = np.arange(len(tr.data)) * tr.stats.delta + (tr.stats.starttime - t)
		plt.plot(time, (tr.data - np.median(tr.data))*plot_scale_fac /(tr.data.max()
			- tr.data.min()) + dist_offset, color = 'black')

		#%% Plot traveltime curves
	if plot_tt:
		# first traveltime curve
		line_pts = 50
		dist_vec  = np.arange(min_dist, max_dist, (max_dist - min_dist)/line_pts) # distance grid
		time_vec1 = np.arange(min_dist, max_dist, (max_dist - min_dist)/line_pts) # empty time grid of same length (filled with -1000)
		for i in range(0,line_pts):
			arrivals = model.get_travel_times(source_depth_in_km=ev_depth,distance_in_degree
										=dist_vec[i],phase_list=[dphase])
			num_arrivals = len(arrivals)
			found_it = 0
			for j in range(0,num_arrivals):
				if arrivals[j].name == dphase:
					time_vec1[i] = arrivals[j].time
					found_it = 1
			if found_it == 0:
				time_vec1[i] = np.nan
	# second traveltime curve
		if dphase2 != 'no':
			time_vec2 = np.arange(min_dist, max_dist, (max_dist - min_dist)/line_pts) # empty time grid of same length (filled with -1000)
			for i in range(0,line_pts):
				arrivals = model.get_travel_times(source_depth_in_km=ev_depth,distance_in_degree
											=dist_vec[i],phase_list=[dphase2])
				num_arrivals = len(arrivals)
				found_it = 0
				for j in range(0,num_arrivals):
					if arrivals[j].name == dphase2:
						time_vec2[i] = arrivals[j].time
						found_it = 1
				if found_it == 0:
					time_vec2[i] = np.nan
			plt.plot(time_vec2,dist_vec, color = 'orange')
		# third traveltime curve
		if dphase3 != 'no':
			time_vec3 = np.arange(min_dist, max_dist, (max_dist - min_dist)/line_pts) # empty time grid of same length (filled with -1000)
			for i in range(0,line_pts):
				arrivals = model.get_travel_times(source_depth_in_km=ev_depth,distance_in_degree
											=dist_vec[i],phase_list=[dphase3])
				num_arrivals = len(arrivals)
				found_it = 0
				for j in range(0,num_arrivals):
					if arrivals[j].name == dphase3:
						time_vec3[i] = arrivals[j].time
						found_it = 1
				if found_it == 0:
					time_vec3[i] = np.nan
			plt.plot(time_vec3,dist_vec, color = 'yellow')
		# fourth traveltime curve
		if dphase4 != 'no':
			time_vec4 = np.arange(min_dist, max_dist, (max_dist - min_dist)/line_pts) # empty time grid of same length (filled with -1000)
			for i in range(0,line_pts):
				arrivals = model.get_travel_times(source_depth_in_km=ev_depth,distance_in_degree
											=dist_vec[i],phase_list=[dphase4])
				num_arrivals = len(arrivals)
				found_it = 0
				for j in range(0,num_arrivals):
					if arrivals[j].name == dphase4:
						time_vec4[i] = arrivals[j].time
						found_it = 1
				if found_it == 0:
					time_vec4[i] = np.nan
			plt.plot(time_vec4,dist_vec, color = 'purple')

		plt.plot(time_vec1,dist_vec, color = 'blue')
		plt.show()

	plt.xlabel('Time (s)')
	plt.ylabel('Epicentral distance from event (°)')
	plt.title('Post-alignment ' + dphase + ' for ' + fname[2:12])
	plt.show()

	#%%
	# plot traces before time shifts
	plt.close(6)
	plt.figure(6,figsize=(10,10))
	plt.xlim(start_plot_win, end_plot_win)
	plt.ylim(min_dist, max_dist)

	for tr in st2: # regenerate distances into st2 as they were loaded into st for plots
		for ii in station_index: # find station in inventory
			tested_name = st_names[ii]
			actual_trace = tr.stats.station
			if ARRAY == 0: # convoluted patch for long Hinet names
				this_name_truc = tested_name[0:5]
				name_truc_cap  = this_name_truc.upper()
				this_name = name_truc_cap
				actual_trace = tr.stats.station.upper
			if (actual_trace == tested_name): # found it
				stalon = float(st_lons[ii]) # look up lat & lon to find distance
				stalat = float(st_lats[ii])
				distance = gps2dist_azimuth(stalat,stalon,ev_lat,ev_lon)
				tr.stats.distance=distance[0]/(1000.*111) # distance for phase time and plotting

	for tr in st2: # generate plot
		dist_offset = tr.stats.distance # trying for approx degrees
		time = np.arange(len(tr.data)) * tr.stats.delta + (tr.stats.starttime - t)
		plt.plot(time, (tr.data - np.median(tr.data))*plot_scale_fac /(tr.data.max()
			- tr.data.min()) + dist_offset, color = 'black')

		#%% Plot traveltime curves
	if plot_tt:
		# first traveltime curve
		line_pts = 50
		dist_vec  = np.arange(min_dist, max_dist, (max_dist - min_dist)/line_pts) # distance grid
		time_vec1 = np.arange(min_dist, max_dist, (max_dist - min_dist)/line_pts) # empty time grid of same length (filled with -1000)
		for i in range(0,line_pts):
			arrivals = model.get_travel_times(source_depth_in_km=ev_depth,distance_in_degree
										=dist_vec[i],phase_list=[dphase])
			num_arrivals = len(arrivals)
			found_it = 0
			for j in range(0,num_arrivals):
				if arrivals[j].name == dphase:
					time_vec1[i] = arrivals[j].time
					found_it = 1
			if found_it == 0:
				time_vec1[i] = np.nan
	# second traveltime curve
		if dphase2 != 'no':
			time_vec2 = np.arange(min_dist, max_dist, (max_dist - min_dist)/line_pts) # empty time grid of same length (filled with -1000)
			for i in range(0,line_pts):
				arrivals = model.get_travel_times(source_depth_in_km=ev_depth,distance_in_degree
											=dist_vec[i],phase_list=[dphase2])
				num_arrivals = len(arrivals)
				found_it = 0
				for j in range(0,num_arrivals):
					if arrivals[j].name == dphase2:
						time_vec2[i] = arrivals[j].time
						found_it = 1
				if found_it == 0:
					time_vec2[i] = np.nan
			plt.plot(time_vec2,dist_vec, color = 'orange')
		# third traveltime curve
		if dphase3 != 'no':
			time_vec3 = np.arange(min_dist, max_dist, (max_dist - min_dist)/line_pts) # empty time grid of same length (filled with -1000)
			for i in range(0,line_pts):
				arrivals = model.get_travel_times(source_depth_in_km=ev_depth,distance_in_degree
											=dist_vec[i],phase_list=[dphase3])
				num_arrivals = len(arrivals)
				found_it = 0
				for j in range(0,num_arrivals):
					if arrivals[j].name == dphase3:
						time_vec3[i] = arrivals[j].time
						found_it = 1
				if found_it == 0:
					time_vec3[i] = np.nan
			plt.plot(time_vec3,dist_vec, color = 'yellow')
		# fourth traveltime curve
		if dphase4 != 'no':
			time_vec4 = np.arange(min_dist, max_dist, (max_dist - min_dist)/line_pts) # empty time grid of same length (filled with -1000)
			for i in range(0,line_pts):
				arrivals = model.get_travel_times(source_depth_in_km=ev_depth,distance_in_degree
											=dist_vec[i],phase_list=[dphase4])
				num_arrivals = len(arrivals)
				found_it = 0
				for j in range(0,num_arrivals):
					if arrivals[j].name == dphase4:
						time_vec4[i] = arrivals[j].time
						found_it = 1
				if found_it == 0:
					time_vec4[i] = np.nan
			plt.plot(time_vec4,dist_vec, color = 'purple')

		plt.plot(time_vec1,dist_vec, color = 'blue')
		plt.show()

	plt.xlabel('Time (s)')
	plt.ylabel('Epicentral distance from event (°)')
	plt.title('Pre-alignment ' + dphase + ' for ' + fname[2:12])
	plt.show()

	#  Save aligned traces
	fname_sfile = 'HA' + date_label[:10] + 'pro4_' + dphase + '.mseed'
	fname_stats = 'HA' + date_label[:10] + 'pro4_' + dphase + '.statics'

	fname_sfile = '/Users/vidale/Documents/Github/Array_codes/Files/' + fname_sfile
	fname_stats = '/Users/vidale/Documents/Github/Array_codes/Files/' + fname_stats

	stgood.write(fname_sfile,format = 'MSEED')

	#  Save station static correction files
	#fname_stats = 'Statics' + etime[:10] + dphase + ref_trace + '.txt'
	stats_file = open(fname_stats, 'w')
	len_file1 = len(sta_names)
	for j in range(0,len_file1):
		dist_str = '{:.2f}'.format(  sta_dists[j]) # 3 digits after decimal place
		lat_str  = '{:.4f}'.format(   sta_lats[j]) # 2 digits after decimal place
		lon_str  = '{:.4f}'.format(   sta_lons[j])
		stat_str = '{:.3f}'.format(sta_statics[j])
		corr_str = '{:.3f}'.format(  sta_corrs[j])
		write_line = sta_names[j] +' ' + dist_str +' ' + lat_str +' ' + lon_str +' ' + stat_str + ' ' + corr_str + '\n'
		stats_file.write(write_line)
	file.close()
	print('Correlation files have: ' + str(len_file1) + ' traces')

	os.system('say "Done"')
    tmp.filter("bandpass", freqmin=2, freqmax=10)
    tmp.resample(200)

tr1 = st1[0]
tr2 = st2[0]

tr1.plot()
tr2.plot()

#dt, coeff = xcorr_pick_correction(t1, tr1, t2, tr2, 0.05, 0.25, 0.2, plot=True)

#print("No preprocessing:")
#print("  Time correction for pick 2: %.6f" % dt)
#print("  Correlation coefficient: %.2f" % coeff)

dt, coeff = xcorr_pick_correction(t1, tr1, t2, tr2, 0.05, 0.2, 0.1, plot=True)

print("Bandpass prefiltering:")
print("  Time correction for pick 2: %.6f" % dt)
print("  Correlation coefficient: %.2f" % coeff)

# +
from obspy import read, UTCDateTime
from obspy.signal.cross_correlation import xcorr_pick_correction

t1 = UTCDateTime(reference_pick)

for t2 in pick_times:
    t2 = UTCDateTime(t2)

    st1 = read("./data/mtcarmel_100hz.mseed", starttime=t1-2, endtime=t1+5)
        syn_tr.data = np.require(syn_tr, requirements=["C"])

        # Interpolate both to the same sample points.
        starttime = max(obs_tr.stats.starttime, syn_tr.stats.starttime)
        endtime = min(obs_tr.stats.endtime, syn_tr.stats.endtime)
        npts = int((endtime - starttime) // (1.0 / 40.0) - 1)
        obs_tr.interpolate(sampling_rate=40.0, method="lanczos", a=5,
                           starttime=starttime, npts=npts)
        syn_tr.interpolate(sampling_rate=40.0, method="lanczos", a=5,
                           starttime=starttime, npts=npts)

        print(event.id, network, station)

        # Subsample precision.
        try:
            shift, corr = xcorr_pick_correction(pick, obs_tr, pick, syn_tr, t_before=20,
                                                t_after=20, cc_maxlag=12)
        except:
            shift, corr = None, None
        collected_stats.append({"time_shift": shift, "correlation": corr,
                                "network": network, "station": station, "event": event.id})

        if "processed_observed_stream" not in this_data:
            this_data["processed_observed_stream"] = obspy.Stream()

        this_data["processed_observed_stream"] += obs_tr

        if "processed_synthetic_stream" not in this_data:
            this_data["processed_synthetic_stream"] = obspy.Stream()

        this_data["processed_synthetic_stream"] += syn_tr