Пример #1
0
   def __init__(self,tr_e,tr_n,tr_z,**kwargs):
   ####################################################################################
      '''
      Initialize receiver function

      params:
      tr_e  :obspy trace object, BHE channel
      tr_n  :obspy trace object, BHN channel
      tr_z  :obspy trace object, BHZ channel

      **kwargs:
      taup_model: TauPyModel instance.  Passing a pre-existing model speeds things up 
                  since it isn't necessary to initialize the the model when creating
                  the receiver function object.

      taup_model_name: If taup_model = 'none', you can tell it which model it use. The
                       default is prem_5km.

      window:  A tuple describing the time window of the receiver function (times given
               relative to P).
      '''
      #inherit taup model to avoid initializing the model for every receiver function
      taup_model = kwargs.get('taup_model','none')
      taup_model_name = kwargs.get('taup_model_name','ak135') #prem_5km if doing migrations

      if taup_model == 'none':
         self.model = TauPyModel(model=taup_model_name)
      else:
         self.model = taup_model

      #cut window centered on P phase
      self.window  = kwargs.get('window',[-10,150])
      self.tr_e    = phase_window(tr_e,phases=['P'],window_tuple=self.window,taup_model=self.model)
      self.tr_n    = phase_window(tr_n,phases=['P'],window_tuple=self.window,taup_model=self.model)
      self.tr_z    = phase_window(tr_z,phases=['P'],window_tuple=self.window,taup_model=self.model)
      self.time    = np.linspace(self.window[0],self.window[1],len(self.tr_e.data))
      self.dt      = 1.0/self.tr_e.stats.sampling_rate

      #make start time zero NOT SURE IF THIS WORKS!!! RM/ 4/13/16
      self.tr_e.starttime = 0
      self.tr_n.starttime = 0
      self.tr_z.starttime = 0

      #initialize obspy stream
      self.rf_st   = obspy.Stream(self.tr_e)
      self.rf_st  += self.tr_n
      self.rf_st  += self.tr_z

      self.gcarc   = self.tr_e.stats.sac['gcarc']
      self.evdp    = self.tr_e.stats.sac['evdp']
      self.pierce  = []

      #read slowness table for moveout correction
      self.slowness_table = np.loadtxt('/geo/work10/romaguir/seismology/seis_tools/seispy/slowness_table.dat')

      #get slowness and predicted P410s, P660s arrival times
      tt = self.model.get_travel_times(source_depth_in_km = self.evdp,
                                       distance_in_degree = self.gcarc,
                                       phase_list=['P','P410s','P660s'])

      #just in case there's more than one phase arrival, loop through tt list
      for i in range(0,len(tt)):
         if tt[i].name == 'P':
            self.predicted_p_arr = tt[i].time 
            #ray parameter (horizontal slowness) of incident plane wave 
            self.ray_param = tt[i].ray_param_sec_degree
         elif tt[i].name == 'P410s':
            self.predicted_p410s_arr = tt[i].time 
         if tt[i].name == 'P660s':
            self.predicted_p660s_arr = tt[i].time 

      #event information
      self.evla  = self.tr_e.stats.sac['evla']
      self.evlo  = self.tr_e.stats.sac['evlo']
      self.stla  = self.tr_e.stats.sac['stla']
      self.stlo  = self.tr_e.stats.sac['stlo']
      self.gcarc = self.tr_e.stats.sac['gcarc']
      self.evdp  = self.tr_e.stats.sac['evdp']
Пример #2
0
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
plt.rcParams['figure.figsize'] = (13, 8) if False else (10, 6)

from obspy.taup.taup import getTravelTimes
from obspy.core.util.geodetics import gps2DistAzimuth
from obspy.taup import TauPyModel

from seismon.eqmon import ampRf, shoot

degrees = np.linspace(1,180,180)
distances = degrees*(np.pi/180)*6370000
depths = np.linspace(1,100,100)

model = TauPyModel(model="iasp91")
#model = TauPyModel(model="1066a")

fwd = 0
back = 0

eqlat, eqlon = 35.6895, 139.6917

GPS = 0
magnitude = 6.0
depth = 20.0
Rf0 = 76.44
Rfs = 1.37
cd = 440.68
rs = 1.57
Пример #3
0
filter_freq = []

for ifilter in range(0, args.nfilter):
    filter_name.append('band%02d' % ifilter)
    filter_freq.append(args.f0 * 2.0**(ifilter * 0.5))
    filter_length.append(args.f0 * 2.0**(ifilter * 0.5) * 1.5)

with open('filters.dat', 'w') as fid:
    fid.write('%d   Number of filters\n' % args.nfilter)
    for ifilter in range(0, args.nfilter):
        fid.write('%s %s %4.1f 0.5 %4.1f 0\n' %
                  (filter_name[ifilter], 'Gabor', filter_freq[ifilter],
                   filter_freq[ifilter]))

# Version to loop over stations in inventory
model = TauPyModel(model='ak135')

# Length of wavefield database
t_max = 1800.

if args.receivers is None:
    stats_sort = []
    dist_min = 20.0
    dist_max = 100.0
    dist_step = 20.0

    dists = np.arange(dist_min, dist_max + dist_step, dist_step)

    stats = []
    for irec in range(0, len(dists)):
        stat = obspy.core.inventory.Station(latitude=90.0 - dists[irec],
Пример #4
0
#!/usr/bin/env python

import scipy
import obspy
from obspy.taup import TauPyModel
import numpy as np
model = TauPyModel(model="prem")

'''
Samuel Haugland 01/19/16

seis_filter.py includes functions needed to remove unwanted traces from
streams based on various criteria. All functions should take a stream object
and arguments and return the filtered stream object
'''

def kurtosis_filter(st, **kwargs):
    '''
    remove traces from phase based on kurtosis
    '''

    alpha = kwargs.get('alpha', False)
    if alpha is not False:
        alpha = 0.5

    k = []
    for tr in st:
        ki = scipy.stats.kurtosis(tr.data)
        if np.isnan(ki):
            st.remove(tr)
            continue
from matplotlib.ticker import FormatStrFormatter
from matplotlib.offsetbox import OffsetImage, AnnotationBbox


def nospaces(string):
    out = ""
    for l in string.upper():
        if l in "ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789":
            out += l
        else:
            out += "_"
    return out


# The velocity model to use for calculating phase arrival times see: https://docs.obspy.org/packages/obspy.taup.html
model = TauPyModel(model='iasp91')
# List of phases to display. Uncomment the line you want, or create a new list
#PHASES = sorted(["P", "pP", "pPP", "PP", "PPP", "S", "Pdiff", "PKP", "PKIKP", "PcP", "ScP", "ScS", "PKiKP", "SKiKP", "SKP", "SKS"]) # list of phases for which to compute theoretical times
PHASES = sorted(["PKP", "PKIKP", "PKiKP", "SKiKP", "SKP"
                 ])  # list of phases for which to compute theoretical times
#PHASES = sorted(["Pg","Pn","PnPn","PgPg","PmP","PmS","Sg","Sn","SnSn","SgSg","SmS","SmP"])
# Provider of waveform data
DATA_PROVIDER = "RASPISHAKE"

# Event details
URL = 'https://earthquake.usgs.gov/earthquakes/eventpage/us6000e0iy/executive'
EQNAME = 'M6 44 km SSW of Gongdanglegi Kulon, Indonesia'
EQLAT = -8.5622
EQLON = 112.519
EQZ = 82.28
EQTIME = '2021-04-10 07:00:17'
Пример #6
0
STATION_FILENAME = "station_list.txt"
conn, cursor = parse_station_file(STATION_FILENAME)

EVENT_FILENAME = "event_db.json"
create_event_json_file(EVENT_FILENAME)


def get_event(event_id):
    return get_event_information(event_id=event_id, filename=EVENT_FILENAME)


def get_station_coordinates(networks, stations):
    return get_coordinates(cursor, networks=networks, stations=stations)


tau_model = TauPyModel(model="ak135")


def get_travel_time(sourcelatitude, sourcelongitude, sourcedepthinmeters,
                    receiverlatitude, receiverlongitude, receiverdepthinmeters,
                    phase_name, db_info):
    if receiverdepthinmeters:
        raise ValueError("This travel time implementation cannot calculate "
                         "buried receivers.")

    great_circle_distance = geodetics.locations2degrees(
        sourcelatitude, sourcelongitude, receiverlatitude, receiverlongitude)

    try:
        tts = tau_model.get_travel_times(
            source_depth_in_km=sourcedepthinmeters / 1000.0,
Пример #7
0
def checkComCat(rtable, ftable, cnum, f, startTime, windowStart, opt):
    """
    Checks repeater trigger times with projected arrival times from ANSS Comprehensive
    Earthquake Catalog (ComCat) and writes these to HTML and image files. Will also
    check NCEDC catalog if location is near Northern California.
    
    rtable: Repeater table
    ftable: Families table
    cnum: cluster number to check
    f: HTML file to write to
    startTime: startTime column from rtable (convenience)
    windowStart: windowStart column from rtable (convenience)
    opt: Options object describing station/run parameters
    
    Traces through iasp91 global velocity model; checks for local, regional, and
    teleseismic matches for limited set of phase arrivals
    """

    pc = ['Potential', 'Conflicting']
    model = TauPyModel(model="iasp91")
    mc = 0
    n = 0
    l = 0
    stalats = np.array(opt.stalats.split(',')).astype(float)
    stalons = np.array(opt.stalons.split(',')).astype(float)
    latc = np.mean(stalats)
    lonc = np.mean(stalons)

    members = np.fromstring(ftable[cnum]['members'], dtype=int, sep=' ')
    order = np.argsort(startTime[members])
    f.write('</br><b>ComCat matches:</b></br>')

    for m in members[order]:
        t = UTCDateTime(startTime[m]) + windowStart[m] / opt.samprate
        cc_url = ('http://earthquake.usgs.gov/fdsnws/event/1/query?'
                  'starttime={}&endtime={}&format=text').format(
                      t - 1800, t + 30)
        try:
            comcat = pd.read_csv(cc_url, delimiter='|')
            otime = comcat['Time'].tolist()
            lat = comcat['Latitude'].tolist()
            lon = comcat['Longitude'].tolist()
            dep = comcat['Depth/km'].tolist()
            mag = comcat['Magnitude'].tolist()
            place = comcat['EventLocationName'].tolist()
        except urllib2.HTTPError:
            otime = []
            lat = []
            lon = []
            dep = []
            mag = []
            place = []

        # Check if near Northern California, then go to NCEDC for additional events but
        # for shorter time interval
        if latc > 34 and latc < 42 and lonc > -124 and lonc < -116:
            cc_urlnc = ('http://ncedc.org/fdsnws/event/1/query?'
                        'starttime={}&endtime={}&format=text').format(
                            (t - 60).isoformat(), (t + 30).isoformat())
            try:
                ncedc = pd.read_csv(cc_urlnc, delimiter='|')
                otime.extend(ncedc[' Time '].tolist())
                lat.extend(ncedc[' Latitude '].tolist())
                lon.extend(ncedc[' Longitude '].tolist())
                dep.extend(ncedc[' Depth/km '].tolist())
                mag.extend(ncedc[' Magnitude '].tolist())
                place.extend(ncedc[' EventLocationName'].tolist())
            except ValueError:
                pass

        n0 = 0
        for c in range(len(otime)):
            deg = locations2degrees(lat[c], lon[c], latc, lonc)
            dt = t - UTCDateTime(otime[c])

            if deg <= opt.locdeg:
                mc += 1
                if np.remainder(mc, 100) == 0:
                    model = TauPyModel(model="iasp91")
                arrivals = model.get_travel_times(
                    source_depth_in_km=max(0, dep[c]),
                    distance_in_degree=deg,
                    phase_list=['p', 's', 'P', 'S'])
                if len(arrivals) > 0:
                    pt = np.zeros((len(arrivals), ))
                    pname = []
                    for a in range(len(arrivals)):
                        pt[a] = arrivals[a].time - dt
                        pname.append(arrivals[a].name)
                    if np.min(abs(pt)) < opt.serr:
                        amin = np.argmin(abs(pt))
                        f.write(
                            ('{} local match: {} ({}, {}) {}km M{} {} - ({}) '
                             '{:4.2f} s</br>').format(pc[n0], otime[c], lat[c],
                                                      lon[c], dep[c], mag[c],
                                                      place[c], pname[amin],
                                                      pt[amin]))
                        n0 = 1
                        l = l + 1
                        if l == 1:
                            llats = np.array(lat[c])
                            llons = np.array(lon[c])
                            ldeps = np.array(dep[c])
                        else:
                            llats = np.append(llats, lat[c])
                            llons = np.append(llons, lon[c])
                            ldeps = np.append(ldeps, dep[c])
            elif deg <= opt.regdeg and mag[c] >= opt.regmag:
                mc += 1
                if np.remainder(mc, 100) == 0:
                    model = TauPyModel(model="iasp91")
                arrivals = model.get_travel_times(
                    source_depth_in_km=max(0, dep[c]),
                    distance_in_degree=deg,
                    phase_list=['p', 's', 'P', 'S', 'PP', 'SS'])
                if len(arrivals) > 0:
                    pt = np.zeros((len(arrivals), ))
                    pname = []
                    for a in range(len(arrivals)):
                        pt[a] = arrivals[a].time - dt
                        pname.append(arrivals[a].name)
                    if np.min(abs(pt)) < opt.serr:
                        amin = np.argmin(abs(pt))
                        f.write((
                            '{} regional match: {} ({}, {}) {}km M{} {} - ({}) '
                            '{:4.2f} s</br>').format(pc[n0], otime[c], lat[c],
                                                     lon[c], dep[c], mag[c],
                                                     place[c], pname[amin],
                                                     pt[amin]))
                        n0 = 1
            elif deg > opt.regdeg and mag[c] >= opt.telemag:
                mc += 1
                if np.remainder(mc, 100) == 0:
                    model = TauPyModel(model="iasp91")
                arrivals = model.get_travel_times(
                    source_depth_in_km=max(0, dep[c]),
                    distance_in_degree=deg,
                    phase_list=[
                        'P', 'S', 'PP', 'SS', 'PcP', 'ScS', 'PKiKP', 'PKIKP'
                    ])
                if len(arrivals) > 0:
                    pt = np.zeros((len(arrivals), ))
                    pname = []
                    for a in range(len(arrivals)):
                        pt[a] = arrivals[a].time - dt
                        pname.append(arrivals[a].name)
                    if np.min(abs(pt)) < opt.serr:
                        amin = np.argmin(abs(pt))
                        f.write((
                            '{} teleseismic match: {} ({}, {}) {}km M{} {} - ({}) '
                            '{:4.2f} s</br>').format(pc[n0], otime[c], lat[c],
                                                     lon[c], dep[c], mag[c],
                                                     place[c], pname[amin],
                                                     pt[amin]))
                        n0 = 1
        if n0 > 1:
            n = n + 1
        else:
            n = n + n0
    if n > 0:
        f.write('Total potential matches: {}</br>'.format(n))
        f.write('Potential local matches: {}</br>'.format(l))
        if l > 0:
            m = Basemap(llcrnrlon=lonc - 2 * opt.locdeg,
                        llcrnrlat=latc - opt.locdeg,
                        urcrnrlon=lonc + 2 * opt.locdeg,
                        urcrnrlat=latc + opt.locdeg,
                        resolution='l',
                        projection='tmerc',
                        lon_0=lonc,
                        lat_0=latc)
            m.scatter(llons,
                      llats,
                      s=5,
                      alpha=0.5,
                      marker='o',
                      color='r',
                      latlon=True)
            m.scatter(stalons,
                      stalats,
                      marker='^',
                      color='k',
                      facecolors='None',
                      latlon=True)
            m.drawparallels(np.arange(np.floor(latc - opt.locdeg),
                                      np.ceil(latc + opt.locdeg),
                                      opt.locdeg / 2),
                            labels=[1, 0, 0, 0])
            m.drawmeridians(np.arange(np.floor(lonc - 2 * opt.locdeg),
                                      np.ceil(lonc + 2 * opt.locdeg),
                                      opt.locdeg),
                            labels=[0, 0, 0, 1])
            m.drawmapscale(lonc - opt.locdeg - 0.1,
                           latc - opt.locdeg + 0.1,
                           lonc,
                           latc,
                           length=50,
                           barstyle='fancy')
            plt.title('{} potential local matches (~{:3.1f} km depth)'.format(
                l, np.mean(ldeps)))
            plt.savefig('./{}/clusters/map{}.png'.format(opt.groupName, cnum),
                        dpi=100)
            plt.close()
            f.write('<img src="map{}.png"></br>'.format(cnum))
    else:
        f.write('No matches found</br>')
Пример #8
0
def run_parallel_generate_ruptures(
        home, project_name, run_name, fault_name, slab_name, mesh_name,
        load_distances, distances_name, UTM_zone, tMw, model_name, hurst, Ldip,
        Lstrike, num_modes, Nrealizations, rake, rise_time_depths0,
        rise_time_depths1, time_epi, max_slip, source_time_function, lognormal,
        slip_standard_deviation, scaling_law, ncpus, force_magnitude,
        force_area, mean_slip_name, hypocenter, slip_tol, force_hypocenter,
        no_random, shypo, use_hypo_fraction, shear_wave_fraction,
        max_slip_rule, rank, size):
    '''
    Depending on user selected flags parse the work out to different functions
    '''

    from numpy import load, save, genfromtxt, log10, cos, sin, deg2rad, savetxt, zeros, where
    from time import gmtime, strftime
    from numpy.random import shuffle
    from mudpy import fakequakes
    from obspy import UTCDateTime
    from obspy.taup import TauPyModel
    import warnings

    #I don't condone it but this cleans up the warnings
    warnings.filterwarnings("ignore")
    print "hello?"
    # Fix input formats
    rank = int(rank)
    size = int(size)
    if time_epi == 'None':
        time_epi = None
    else:
        time_epi = UTCDateTime(time_epi)
    rise_time_depths = [rise_time_depths0, rise_time_depths1]
    #hypocenter=[hypocenter_lon,hypocenter_lat,hypocenter_dep]
    tMw = tMw.split(',')
    target_Mw = zeros(len(tMw))
    for rMw in range(len(tMw)):
        target_Mw[rMw] = float(tMw[rMw])

    print "HELLO?"

    #Should I calculate or load the distances?
    if load_distances == 1:
        Dstrike = load(home + project_name + '/data/distances/' +
                       distances_name + '.strike.npy')
        Ddip = load(home + project_name + '/data/distances/' + distances_name +
                    '.dip.npy')
    else:
        Dstrike, Ddip = fakequakes.subfault_distances_3D(
            home, project_name, fault_name, slab_name, UTM_zone)
        save(
            home + project_name + '/data/distances/' + distances_name +
            '.strike.npy', Dstrike)
        save(
            home + project_name + '/data/distances/' + distances_name +
            '.dip.npy', Ddip)

    #Read fault and prepare output variable
    whole_fault = genfromtxt(home + project_name + '/data/model_info/' +
                             fault_name)

    #Get structure model
    vel_mod_file = home + project_name + '/structure/' + model_name

    #Get TauPyModel
    velmod = TauPyModel(model=home + project_name + '/structure/' +
                        model_name.split('.')[0])

    #Now loop over the number of realizations
    realization = 0
    if rank == 0:
        print('Generating rupture scenarios')
    for kmag in range(len(target_Mw)):
        if rank == 0:
            print('... Calculating ruptures for target magnitude Mw = ' +
                  str(target_Mw[kmag]))
        for kfault in range(Nrealizations):
            if kfault % 1 == 0 and rank == 0:
                print('... ... working on ruptures ' +
                      str(ncpus * realization) + ' to ' +
                      str(ncpus * (realization + 1) - 1) + ' of ' +
                      str(Nrealizations * size * len(target_Mw)))
                #print '... ... working on ruptures '+str(ncpus*realization+rank)+' of '+str(Nrealizations*size-1)

            #Prepare output
            fault_out = zeros((len(whole_fault), 14))
            fault_out[:, 0:8] = whole_fault[:, 0:8]
            fault_out[:, 10:12] = whole_fault[:, 8:]

            #Sucess criterion
            success = False
            while success == False:
                #Select only a subset of the faults based on magnitude scaling
                current_target_Mw = target_Mw[kmag]
                ifaults, hypo_fault, Lmax, Wmax, Leff, Weff = fakequakes.select_faults(
                    whole_fault,
                    Dstrike,
                    Ddip,
                    current_target_Mw,
                    num_modes,
                    scaling_law,
                    force_area,
                    no_shallow_epi=False,
                    no_random=no_random,
                    subfault_hypocenter=shypo,
                    use_hypo_fraction=use_hypo_fraction)
                fault_array = whole_fault[ifaults, :]
                Dstrike_selected = Dstrike[ifaults, :][:, ifaults]
                Ddip_selected = Ddip[ifaults, :][:, ifaults]

                #Determine correlation lengths from effective length.width Leff and Weff
                if Lstrike == 'MB2002':  #Use scaling
                    #Ls=10**(-2.43+0.49*target_Mw)
                    Ls = 2.0 + (1. / 3) * Leff
                elif Lstrike == 'auto':
                    Ls = 17.7 + 0.34 * Leff
                else:
                    Ls = Lstrike
                if Ldip == 'MB2002':  #Use scaling
                    #Ld=10**(-1.79+0.38*target_Mw)
                    Ld = 1.0 + (1. / 3) * Weff
                elif Ldip == 'auto':
                    Ld = 6.8 + 0.4 * Weff
                else:
                    Ld = Ldip

                #Get the mean uniform slip for the target magnitude
                if mean_slip_name == None:
                    mean_slip, mu = fakequakes.get_mean_slip(
                        target_Mw[kmag], fault_array, vel_mod_file)
                else:
                    foo, mu = fakequakes.get_mean_slip(target_Mw[kmag],
                                                       fault_array,
                                                       vel_mod_file)
                    mean_fault = genfromtxt(mean_slip_name)
                    mean_slip = (mean_fault[:, 8]**2 +
                                 mean_fault[:, 9]**2)**0.5

                    #keep onlt faults that have man slip inside the fault_array seelcted faults
                    mean_slip = mean_slip[ifaults]

                    #get the area in those selected faults
                    area = fault_array[:, -2] * fault_array[:, -1]

                    #get the moment in those selected faults
                    moment_on_selected = (area * mu * mean_slip).sum()

                    #target moment
                    target_moment = 10**(1.5 * target_Mw[kmag] + 9.1)

                    #How much do I need to upscale?
                    scale_factor = target_moment / moment_on_selected

                    #rescale the slip
                    mean_slip = mean_slip * scale_factor

                    #Make sure mean_slip has no zero slip faults
                    izero = where(mean_slip == 0)[0]
                    mean_slip[izero] = slip_tol

                #Get correlation matrix
                C = fakequakes.vonKarman_correlation(Dstrike_selected,
                                                     Ddip_selected, Ls, Ld,
                                                     hurst)

                # Lognormal or not?
                if lognormal == False:
                    #Get covariance matrix
                    C_nonlog = fakequakes.get_covariance(
                        mean_slip, C, target_Mw[kmag], fault_array,
                        vel_mod_file, slip_standard_deviation)
                    #Get eigen values and eigenvectors
                    eigenvals, V = fakequakes.get_eigen(C_nonlog)
                    #Generate fake slip pattern
                    rejected = True
                    while rejected == True:
                        #                        slip_unrectified,success=make_KL_slip(fault_array,num_modes,eigenvals,V,mean_slip,max_slip,lognormal=False,seed=kfault)
                        slip_unrectified, success = fakequakes.make_KL_slip(
                            fault_array,
                            num_modes,
                            eigenvals,
                            V,
                            mean_slip,
                            max_slip,
                            lognormal=False,
                            seed=None)
                        slip, rejected, percent_negative = fakequakes.rectify_slip(
                            slip_unrectified, percent_reject=13)
                        if rejected == True:
                            print(
                                '... ... ... negative slip threshold exceeeded with %d%% negative slip. Recomputing...'
                                % (percent_negative))
                else:
                    #Get lognormal values
                    C_log, mean_slip_log = fakequakes.get_lognormal(
                        mean_slip, C, target_Mw[kmag], fault_array,
                        vel_mod_file, slip_standard_deviation)
                    #Get eigen values and eigenvectors
                    eigenvals, V = fakequakes.get_eigen(C_log)
                    #Generate fake slip pattern
                    #                    slip,success=make_KL_slip(fault_array,num_modes,eigenvals,V,mean_slip_log,max_slip,lognormal=True,seed=kfault)
                    slip, success = fakequakes.make_KL_slip(fault_array,
                                                            num_modes,
                                                            eigenvals,
                                                            V,
                                                            mean_slip_log,
                                                            max_slip,
                                                            lognormal=True,
                                                            seed=None)

                #Slip pattern sucessfully made, moving on.
                #Rigidities
                foo, mu = fakequakes.get_mean_slip(target_Mw[kmag],
                                                   whole_fault, vel_mod_file)
                fault_out[:, 13] = mu

                #Calculate moment and magnitude of fake slip pattern
                M0 = sum(slip * fault_out[ifaults, 10] *
                         fault_out[ifaults, 11] * mu[ifaults])
                Mw = (2. / 3) * (log10(M0) - 9.1)

                #Check max_slip_rule
                if max_slip_rule == True:

                    max_slip_from_rule = 10**(-4.94 + 0.71 * Mw
                                              )  #From Allen & Hayes, 2017
                    max_slip_tolerance = 3

                    if slip.max() > max_slip_tolerance * max_slip_from_rule:
                        success = False
                        print(
                            '... ... ... max slip condition violated max_slip_rule, recalculating...'
                        )

                #Force to target magnitude
                if force_magnitude == True:
                    M0_target = 10**(1.5 * target_Mw[kmag] + 9.1)
                    M0_ratio = M0_target / M0
                    #Multiply slip by ratio
                    slip = slip * M0_ratio
                    #Recalculate
                    M0 = sum(slip * fault_out[ifaults, 10] *
                             fault_out[ifaults, 11] * mu[ifaults])
                    Mw = (2. / 3) * (log10(M0) - 9.1)

                #check max_slip again
                if slip.max() > max_slip:
                    success = False
                    print(
                        '... ... ... max slip condition violated due to force_magnitude=True, recalculating...'
                    )

            #Get stochastic rake vector
            stoc_rake = fakequakes.get_stochastic_rake(rake, len(slip))

            #Place slip values in output variable
            fault_out[ifaults, 8] = slip * cos(deg2rad(stoc_rake))
            fault_out[ifaults, 9] = slip * sin(deg2rad(stoc_rake))

            #Move hypocenter to somewhere with a susbtantial fraction of peak slip
            #            slip_fraction=0.25
            #            islip=where(slip>slip.max()*slip_fraction)[0]
            #            shuffle(islip) #randomize
            #            hypo_fault=ifaults[islip[0]] #select first from randomized vector

            #Calculate and scale rise times
            rise_times = fakequakes.get_rise_times(M0, slip, fault_array,
                                                   rise_time_depths, stoc_rake)

            #Place rise_times in output variable
            fault_out[:, 7] = 0
            fault_out[ifaults, 7] = rise_times

            #Calculate rupture onset times
            if force_hypocenter == False:  #Use random hypo, otehrwise force hypo to user specified
                hypocenter = whole_fault[hypo_fault, 1:4]

            t_onset = fakequakes.get_rupture_onset(home, project_name, slip,
                                                   fault_array, model_name,
                                                   hypocenter,
                                                   rise_time_depths, M0,
                                                   velmod)
            fault_out[:, 12] = 0
            fault_out[ifaults, 12] = t_onset

            #Calculate location of moment centroid
            centroid_lon, centroid_lat, centroid_z = fakequakes.get_centroid(
                fault_out)

            #Write to file
            run_number = str(ncpus * realization + rank).rjust(6, '0')
            outfile = home + project_name + '/output/ruptures/' + run_name + '.' + run_number + '.rupt'
            savetxt(
                outfile,
                fault_out,
                fmt=
                '%d\t%10.6f\t%10.6f\t%8.4f\t%7.2f\t%7.2f\t%4.1f\t%5.2f\t%5.2f\t%5.2f\t%10.2f\t%10.2f\t%5.2f\t%.6e',
                header=
                'No,lon,lat,z(km),strike,dip,rise,dura,ss-slip(m),ds-slip(m),ss_len(m),ds_len(m),rupt_time(s),rigidity(Pa)'
            )

            #Write log file
            logfile = home + project_name + '/output/ruptures/' + run_name + '.' + run_number + '.log'
            f = open(logfile, 'w')
            f.write('Scenario calculated at ' +
                    strftime("%Y-%m-%d %H:%M:%S", gmtime()) + ' GMT\n')
            f.write('Project name: ' + project_name + '\n')
            f.write('Run name: ' + run_name + '\n')
            f.write('Run number: ' + run_number + '\n')
            f.write('Velocity model: ' + model_name + '\n')
            f.write('No. of KL modes: ' + str(num_modes) + '\n')
            f.write('Hurst exponent: ' + str(hurst) + '\n')
            f.write('Corr. length used Lstrike: %.2f km\n' % Ls)
            f.write('Corr. length used Ldip: %.2f km\n' % Ld)
            f.write('Slip std. dev.: %.3f km\n' % slip_standard_deviation)
            f.write('Maximum length Lmax: %.2f km\n' % Lmax)
            f.write('Maximum width Wmax: %.2f km\n' % Wmax)
            f.write('Effective length Leff: %.2f km\n' % Leff)
            f.write('Effective width Weff: %.2f km\n' % Weff)
            f.write('Target magnitude: Mw %.4f\n' % target_Mw[kmag])
            f.write('Actual magnitude: Mw %.4f\n' % Mw)
            f.write('Hypocenter (lon,lat,z[km]): (%.6f,%.6f,%.2f)\n' %
                    (hypocenter[0], hypocenter[1], hypocenter[2]))
            f.write('Hypocenter time: %s\n' % time_epi)
            f.write('Centroid (lon,lat,z[km]): (%.6f,%.6f,%.2f)\n' %
                    (centroid_lon, centroid_lat, centroid_z))
            f.write('Source time function type: %s' % source_time_function)
            f.close()

            realization += 1
Пример #9
0
    def calculate_validation_misfits(self, event: str, iteration: str):
        """

        This fuction computed the L2 weighted waveform misfit over
        a whole trace. It is meant to provide misfits for validation
        purposes. E.g. to steer regularization parameters.

        :param event: name of the event
        :type event: str
        :param iteration: iteration for which to get the misfit
        :type iteration: str
        """
        from scipy.integrate import simps
        from obspy import geodetics
        from lasif.utils import progress
        min_sn_ratio = 0.05
        event = self.comm.events.get(event)

        # Fill cache if necessary.
        if not TAUPY_MODEL_CACHE:
            from obspy.taup import TauPyModel  # NOQA

            TAUPY_MODEL_CACHE["model"] = TauPyModel("AK135")
        model = TAUPY_MODEL_CACHE["model"]

        # Get the ASDF filenames.
        processed_filename = self.comm.waveforms.get_asdf_filename(
            event_name=event["event_name"],
            data_type="processed",
            tag_or_iteration=self.comm.waveforms.preprocessing_tag,
        )
        synthetic_filename = self.comm.waveforms.get_asdf_filename(
            event_name=event["event_name"],
            data_type="synthetic",
            tag_or_iteration=iteration,
        )

        dt = self.comm.project.simulation_settings["time_step_in_s"]

        ds_syn = pyasdf.ASDFDataSet(synthetic_filename, mode="r", mpi=False)
        ds_obs = pyasdf.ASDFDataSet(processed_filename, mode="r", mpi=False)

        event_latitude = event["latitude"]
        event_longitude = event["longitude"]
        event_depth_in_km = event["depth_in_km"]

        minimum_period = self.comm.project.simulation_settings[
            "minimum_period_in_s"]

        misfit = 0.0
        for i, station in enumerate(ds_obs.waveforms.list()):

            if i % 30 == 0:
                progress(i + 1,
                         len(ds_obs.waveforms.list()),
                         status="Computing misfits")
            observed_station = ds_obs.waveforms[station]
            synthetic_station = ds_syn.waveforms[station]

            obs_tag = observed_station.get_waveform_tags()
            syn_tag = synthetic_station.get_waveform_tags()

            try:
                # Make sure both have length 1.
                assert len(obs_tag) == 1, (
                    "Station: %s - Requires 1 observed waveform tag."
                    " Has %i." %
                    (observed_station._station_name, len(obs_tag)))
            except AssertionError:
                continue

            assert len(syn_tag) == 1, (
                "Station: %s - Requires 1 synthetic waveform tag. "
                "Has %i." % (observed_station._station_name, len(syn_tag)))

            obs_tag = obs_tag[0]
            syn_tag = syn_tag[0]

            station_latitude = observed_station.coordinates["latitude"]
            station_longitude = observed_station.coordinates["longitude"]

            st_obs = observed_station[obs_tag]
            st_syn = synthetic_station[syn_tag]

            # Sample points down to 10 points per minimum_period
            # len_s = st_obs[0].stats.endtime - st_obs[0].stats.starttime
            # num_samples_wavelength = 10.0
            # new_sampling_rate = num_samples_wavelength * \
            #                     minimum_period / len_s
            # st_obs = st_obs.resample(new_sampling_rate)
            # st_syn = st_syn.resample(new_sampling_rate)
            # dt = 1.0/new_sampling_rate

            dist_in_deg = geodetics.locations2degrees(station_latitude,
                                                      station_longitude,
                                                      event_latitude,
                                                      event_longitude)

            # Get only a couple of P phases which should be the
            # first arrival
            # for every epicentral distance. Its quite a bit faster
            # than calculating
            # the arrival times for every phase.
            # Assumes the first sample is the centroid time of the event.
            ttp = model.get_travel_times(
                source_depth_in_km=event_depth_in_km,
                distance_in_degree=dist_in_deg,
                phase_list=["ttp"],
            )
            # Sort just as a safety measure.
            ttp = sorted(ttp, key=lambda x: x.time)
            first_tt_arrival = ttp[0].time

            # Estimate noise level from waveforms prior to the
            # first arrival.
            idx_end = int(
                np.ceil((first_tt_arrival - 0.5 * minimum_period) / dt))
            idx_end = max(10, idx_end)
            idx_start = int(
                np.ceil((first_tt_arrival - 2.5 * minimum_period) / dt))
            idx_start = max(10, idx_start)

            if idx_start >= idx_end:
                idx_start = max(0, idx_end - 10)

            for component in ["E", "N", "Z"]:
                try:
                    data_tr = select_component_from_stream(st_obs, component)
                    synth_tr = select_component_from_stream(st_syn, component)
                except LASIFNotFoundError:
                    continue
                # Scale data to synthetics
                scaling_factor = (synth_tr.data.ptp() / data_tr.data.ptp())
                if np.isinf(scaling_factor):
                    continue

                # Store and apply the scaling.
                data_tr.stats.scaling_factor = scaling_factor
                data_tr.data *= scaling_factor

                data = data_tr.data
                abs_data = np.abs(data)
                noise_absolute = abs_data[idx_start:idx_end].max()
                noise_relative = noise_absolute / abs_data.max()

                if noise_relative > min_sn_ratio:
                    continue

                # normalize the trace to [-1,1], reduce source effects
                # and balance amplitudes
                norm_scaling_fac = 1.0 / np.max(np.abs(synth_tr.data))
                data_tr.data *= norm_scaling_fac
                synth_tr.data *= norm_scaling_fac
                # envelope = obspy.signal.filter.envelope(data_tr.data)

                # scale up to around 1, also never divide by 0
                # by adding regularization term, dependent on noise level
                # env_weighting = 1.0 / (
                #            envelope + np.max(envelope) * 0.2)
                # data_tr.data *= env_weighting
                # synth_tr.data *= env_weighting

                diff = data_tr.data - synth_tr.data
                misfit += 0.5 * simps(y=diff**2, dx=data_tr.stats.delta)

        print("\nTotal event misfit: ", misfit)
        return misfit
Пример #10
0
import instaseis
import numpy as np
import matplotlib.pyplot as plt
from scipy import signal
from europa_seismo import acceleration, utils
from obspy.taup import TauPyModel

mars = TauPyModel('EH45Tcold')
db = instaseis.open_db(
    '/home/romaguir/Documents/axisem_databases/Mars_EH45Tcold_5s_noatten_database'
)
print db

vs_surface = 3.0
mars_radius = 3389.5
mars_circ = 2 * np.pi * mars_radius
km_per_deg = mars_circ / 360.0


def get_max_accel(Mw, gcarc, Q=57822, units='dB', plot=False):
    '''
    returns maximum acceleration for a Haskell source

    params:
    Mw: moment magnitude of earthquake
    gcarc: distance in degrees
    Q: quality factor (defaults to 600)
    units: only dB so far... 0 dB is defined as 1e-4 (i.e., 10 mgal)
    '''

    #find scalar moment
Пример #11
0
def plot_raw(rawdir, tcollection, event):
    """Make PNG plots of a collection of raw waveforms.

    Args:
        rawdir (str):
            Directory where PNG files should be saved.
        tcollection (StreamCollection):
            Sequence of streams.
        event (ScalarEvent):
            Event object.

    """
    model = TauPyModel(model="iasp91")
    source_depth = event.depth_km
    if source_depth < 0:
        source_depth = 0
    eqlat = event.latitude
    eqlon = event.longitude
    for stream in tcollection:
        stlat = stream[0].stats.coordinates['latitude']
        stlon = stream[0].stats.coordinates['longitude']
        dist = float(locations2degrees(eqlat, eqlon, stlat, stlon))
        try:
            arrivals = model.get_travel_times(source_depth_in_km=source_depth,
                                              distance_in_degree=dist,
                                              phase_list=['P', 'p', 'Pn'])
            arrival = arrivals[0]
            arrival_time = arrival.time
        except BaseException as e:
            fmt = ('Exception "%s" generated by get_travel_times() dist=%.3f '
                   'depth=%.1f')
            logging.warning(fmt % (str(e), dist, source_depth))
            arrival_time = 0.0
        ptime = arrival_time + (event.time - stream[0].stats.starttime)
        outfile = os.path.join(rawdir, '%s.png' % stream.get_id())

        fig, axeslist = plt.subplots(nrows=3, ncols=1, figsize=(12, 6))
        for ax, trace in zip(axeslist, stream):
            times = np.linspace(0.0,
                                trace.stats.endtime - trace.stats.starttime,
                                trace.stats.npts)
            ax.plot(times, trace.data, color='k')
            ax.set_xlabel('seconds since start of trace')
            ax.set_title('')
            ax.axvline(ptime, color='r')
            ax.set_xlim(left=0, right=times[-1])
            legstr = '%s.%s.%s.%s' % (trace.stats.network, trace.stats.station,
                                      trace.stats.location,
                                      trace.stats.channel)
            ax.legend(labels=[legstr], frameon=True, loc='upper left')
            tbefore = event.time + arrival_time < trace.stats.starttime + 1.0
            tafter = event.time + arrival_time > trace.stats.endtime - 1.0
            if tbefore or tafter:
                legstr = 'P arrival time %.1f seconds' % ptime
                left, right = ax.get_xlim()
                xloc = left + (right - left) / 20
                bottom, top = ax.get_ylim()
                yloc = bottom + (top - bottom) / 10
                ax.text(xloc, yloc, legstr, color='r')
        plt.savefig(outfile, bbox_inches='tight')
        plt.close()
            transform=transform,
            zorder=10,
            fontsize=8,
            color='red')

    # print out the filters that have been used
    plt.text(0, DURATION * 1.05, filtertext1)
    plt.text(0, DURATION * 1.07, filtertext2)

    # Print the coloured phases over the seismic section
    textlist = []  # list of text on plot, to avoid over-writing
    for j, color in enumerate(COLORS):
        phase = PHASES[j]
        x = []
        y = []
        model = TauPyModel(model=MODEL)
        for dist in range(
                MIN_DIST, MAX_DIST + 1,
                1):  # calculate and plot one point for each degree from 0-180
            arrivals = model.get_travel_times(source_depth_in_km=EVT_Z,
                                              distance_in_degree=dist,
                                              phase_list=[phase])
            printed = False
            for i in range(len(arrivals)):
                instring = str(arrivals[i])
                phaseline = instring.split(" ")
                if phaseline[0] == phase and printed == False and int(
                        dist) > 0 and int(dist) < 180 and float(
                            phaseline[4]) > 0 and float(
                                phaseline[4]) < DURATION:
                    x.append(int(dist))
Пример #13
0
def pro6stacked_seis(eq_file1, eq_file2, plot_scale_fac = 0.03, slow_delta = 0.0005,
			  slowR_lo = -0.1, slowR_hi = 0.1, slowT_lo = -0.1, slowT_hi = 0.1,
			  start_buff = -50, end_buff = 50, norm = 0, freq_corr = 1.0,
			  plot_dyn_range = 1000, fig_index = 401, get_stf = 0, ref_phase = 'blank',
			  ARRAY = 0, max_rat = 1.8, min_amp = 0.2, turn_off_black = 0,
			  R_slow_plot = 0, T_slow_plot = 0, tdiff_clip = 1, event_no = 0):

	import obspy
	import obspy.signal
	from obspy import UTCDateTime
	from obspy import Stream, Trace
	from obspy import read
	from obspy.geodetics import gps2dist_azimuth
	import numpy as np
	import os
	from obspy.taup import TauPyModel
	import obspy.signal as sign
	import matplotlib.pyplot as plt
	model = TauPyModel(model='iasp91')
	from scipy.signal import hilbert
	import math
	import time
	import statistics

#%% Get info
	#%% get locations
	print('Running pro6_plot_stacked_seis')
	start_time_wc = time.time()

	dphase = 'PKiKP'

	sta_file = '/Users/vidale/Documents/GitHub/Array_codes/Files/events_good.txt'
	with open(sta_file, 'r') as file:
		lines = file.readlines()
	event_count = len(lines)

	print(str(event_count) + ' lines read from ' + sta_file)
	# Load station coords into arrays
	station_index = range(event_count)
	event_names        = []

	event_index = np.zeros(event_count)
	event_year  = np.zeros(event_count)
	event_mo    = np.zeros(event_count)
	event_day   = np.zeros(event_count)
	event_hr    = np.zeros(event_count)
	event_min   = np.zeros(event_count)
	event_sec   = np.zeros(event_count)
	event_lat   = np.zeros(event_count)
	event_lon   = np.zeros(event_count)
	event_dep   = np.zeros(event_count)
	event_mb    = np.zeros(event_count)
	event_ms    = np.zeros(event_count)
	event_tstart       = np.zeros(event_count)
	event_tend         = np.zeros(event_count)
	event_gcdist       = np.zeros(event_count)
	event_dist         = np.zeros(event_count)
	event_baz          = np.zeros(event_count)
	event_SNR          = np.zeros(event_count)
	event_Sflag        = np.zeros(event_count)
	event_PKiKPflag    = np.zeros(event_count)
	event_ICSflag      = np.zeros(event_count)
	event_PKiKP_radslo = np.zeros(event_count)
	event_PKiKP_traslo = np.zeros(event_count)
	event_PKiKP_qual   = np.zeros(event_count)
	event_ICS_qual     = np.zeros(event_count)

	iii = 0
	for ii in station_index:   # read file
		line = lines[ii]
		split_line = line.split()

		event_index[ii]  = float(split_line[0])
		event_names.append(split_line[1])
		event_year[ii]   = float(split_line[2])
		event_mo[ii]     = float(split_line[3])
		event_day[ii]    = float(split_line[4])
		event_hr[ii]     = float(split_line[5])
		event_min[ii]    = float(split_line[6])
		event_sec[ii]    = float(split_line[7])
		event_lat[ii]    = float(split_line[8])
		event_lon[ii]    = float(split_line[9])
		event_dep[ii]    = float(split_line[10])
		event_mb[ii]     = float(split_line[11])
		event_ms[ii]     = float(split_line[12])
		event_tstart[ii] = float(split_line[13])
		event_tend[ii]   = float(split_line[14])
		event_gcdist[ii] = float(split_line[15])
		event_dist[ii]   = float(split_line[16])
		event_baz[ii]    = float(split_line[17])
		event_SNR[ii]    = float(split_line[18])
		event_Sflag[ii]  = float(split_line[19])
		event_PKiKPflag[ii]     = float(split_line[20])
		event_ICSflag[ii]       = float(split_line[21])
		event_PKiKP_radslo[ii]  = float(split_line[22])
		event_PKiKP_traslo[ii]  = float(split_line[23])
		event_PKiKP_qual[ii]    = float(split_line[24])
		event_ICS_qual[ii]      = float(split_line[25])
#		print('Event ' + str(ii) + ' is ' + str(event_index[ii]))
		if event_index[ii] == event_no:
			iii = ii

	if iii == 0:
		print('Event ' + str(event_no) + ' not found')
	else:
		print('Event ' + str(event_no) + ' is ' + str(iii))
	#  find predicted slowness
	arrivals1 = model.get_travel_times(source_depth_in_km=event_dep[iii],distance_in_degree=event_gcdist[iii]-0.5,phase_list=[dphase])
	arrivals2 = model.get_travel_times(source_depth_in_km=event_dep[iii],distance_in_degree=event_gcdist[iii]+0.5,phase_list=[dphase])
	dtime = arrivals2[0].time - arrivals1[0].time
	event_pred_slo  = dtime/111.  # s/km

	# convert to pred rslo and tslo
	sin_baz = np.sin(event_baz[iii] * np.pi /180)
	cos_baz = np.cos(event_baz[iii] * np.pi /180)
	pred_Nslo = event_pred_slo * cos_baz
	pred_Eslo = event_pred_slo * sin_baz

	#  rotate observed slowness to N and E
	obs_Nslo = (event_PKiKP_radslo[iii] * cos_baz) - (event_PKiKP_traslo[iii] * sin_baz)
	obs_Eslo = (event_PKiKP_radslo[iii] * sin_baz) + (event_PKiKP_traslo[iii] * cos_baz)

	print('PR '+ str(pred_Nslo) + ' PT ' + str(pred_Eslo) + ' OR ' + str(obs_Nslo) + ' OT ' + str(obs_Eslo))
	#  find observed back-azimuth
#	bazi_rad = np.arctan(event_PKiKP_traslo[ii]/event_PKiKP_radslo[ii])
#	event_obs_bazi  = event_baz[ii] + (bazi_rad * 180 / np.pi)

	if ARRAY == 1:
		goto = '/Users/vidale/Documents/PyCode/LASA/EvLocs'
		os.chdir(goto)

	file = open(eq_file1, 'r')
	lines=file.readlines()
	split_line = lines[0].split()
	t1           = UTCDateTime(split_line[1])
	date_label1  = split_line[1][0:10]

	file = open(eq_file2, 'r')
	lines=file.readlines()
	split_line = lines[0].split()
	t2           = UTCDateTime(split_line[1])
	date_label2  = split_line[1][0:10]

	#%% read files
	# #%% Get saved event info, also used to name files
	# date_label = '2018-04-02' # date for filename
	if ARRAY == 1:
		goto = '/Users/vidale/Documents/PyCode/LASA/Pro_files'
		os.chdir(goto)
	fname1 = 'HD' + date_label1 + '_2dstack.mseed'
	fname2 = 'HD' + date_label2 + '_2dstack.mseed'
	st1 = Stream()
	st2 = Stream()
	st1 = read(fname1)
	st2 = read(fname2)

	tshift    = st1.copy()  # make array for time shift
	amp_ratio = st1.copy()  # make array for relative amplitude
	amp_ave   = st1.copy()  # make array for relative amplitude

	print('Read in: event 1 ' + str(len(st1)) + ' event 2 ' + str(len(st2)) + ' traces')
	nt1 = len(st1[0].data)
	nt2 = len(st2[0].data)
	dt1 = st1[0].stats.delta
	dt2 = st2[0].stats.delta
	print('Event 1 - First trace has ' + str(nt1) + ' time pts, time sampling of '
		  + str(dt1) + ' and thus duration of ' + str((nt1-1)*dt1))
	print('Event 2 - First trace has ' + str(nt2) + ' time pts, time sampling of '
		  + str(dt2) + ' and thus duration of ' + str((nt2-1)*dt2))
	if nt1 != nt2 or dt1 != dt2:
		print('nt or dt not does not match')
		exit(-1)

	#%% Make grid of slownesses
	slowR_n = int(1 + (slowR_hi - slowR_lo)/slow_delta)  # number of slownesses
	slowT_n = int(1 + (slowT_hi - slowT_lo)/slow_delta)  # number of slownesses
	print(str(slowT_n) + ' trans slownesses, hi and lo are ' + str(slowT_hi) + '  ' + str(slowT_lo))
	# In English, stack_slows = range(slow_n) * slow_delta - slow_lo
	a1R = range(slowR_n)
	a1T = range(slowT_n)
	stack_Rslows = [(x * slow_delta + slowR_lo) for x in a1R]
	stack_Tslows = [(x * slow_delta + slowT_lo) for x in a1T]
	print(str(slowR_n) + ' radial slownesses, ' + str(slowT_n) + ' trans slownesses, ')

#%%  Loop over slowness
	total_slows = slowR_n * slowT_n
	global_max = 0
	for slow_i in range(total_slows): # find envelope, phase, tshift, and global max
		if slow_i % 200 == 0:
			print('At line 101, ' +str(slow_i) + ' slowness out of ' + str(total_slows))
		if len(st1[slow_i].data) == 0: # test for zero-length traces
				print('%d data has zero length ' % (slow_i))

		seismogram1 = hilbert(st1[slow_i].data)  # make analytic seismograms
		seismogram2 = hilbert(st2[slow_i].data)

		env1 = np.abs(seismogram1) # amplitude
		env2 = np.abs(seismogram2)
		amp_ave[slow_i].data    = 0.5 * (env1 + env2)
		amp_ratio[slow_i].data  = env1/env2

		angle1 = np.angle(seismogram1) # time shift
		angle2 = np.angle(seismogram2)
		phase1 = np.unwrap(angle1)
		phase2 = np.unwrap(angle2)
		dphase = (angle1 - angle2)
#		dphase = phase1 - phase2
		for it in range(nt1):
			if dphase[it] > math.pi:
				dphase[it] -= 2 * math.pi
			elif dphase[it] < -1 * math.pi:
				dphase[it] += 2 * math.pi
			if dphase[it] > math.pi or dphase[it] < -math.pi:
				print(f'Bad dphase value {dphase[it]:.2f}  {it:4d}')
		freq1 = np.diff(phase1) #freq in radians/sec
		freq2 = np.diff(phase2)
		ave_freq = 0.5*(freq1 + freq2)
		ave_freq_plus = np.append(ave_freq,[1]) # ave_freq one element too short
#		tshift[slow_i].data     = dphase / ave_freq_plus # 2*pi top and bottom cancels
		tshift[slow_i].data     = dphase/(2*math.pi*freq_corr)

		local_max = max(abs(amp_ave[slow_i].data))
		if local_max > global_max:
			global_max = local_max
#%% Extract slices
	tshift_full = tshift.copy()  # make array for time shift
	for slow_i in range(total_slows): # ignore less robust points
		if slow_i % 200 == 0:
			print('At line 140, ' +str(slow_i) + ' slowness out of ' + str(total_slows))
		for it in range(nt1):
			if ((amp_ratio[slow_i].data[it] < (1/max_rat)) or (amp_ratio[slow_i].data[it] > max_rat) or (amp_ave[slow_i].data[it] < (min_amp * global_max))):
				tshift[slow_i].data[it] = np.nan
	#%% If desired, find transverse slowness nearest T_slow_plot
	lowest_Tslow = 1000000
	for slow_i in range(slowT_n):
		if abs(stack_Tslows[slow_i] - T_slow_plot) < lowest_Tslow:
			lowest_Tindex = slow_i
			lowest_Tslow = abs(stack_Tslows[slow_i] - T_slow_plot)

	print(str(slowT_n) + ' T slownesses, index ' + str(lowest_Tindex) + ' is closest to input parameter ' + str(T_slow_plot) + ', slowness diff there is ' + str(lowest_Tslow) + ' and slowness is ' + str(stack_Tslows[lowest_Tindex]))
	# Select only stacks with that slowness for radial plot
	centralR_st1 = Stream()
	centralR_st2 = Stream()
	centralR_amp   = Stream()
	centralR_ampr  = Stream()
	centralR_tdiff = Stream()
	for slowR_i in range(slowR_n):
		ii = slowR_i*slowT_n + lowest_Tindex
		centralR_st1 += st1[ii]
		centralR_st2 += st2[ii]
		centralR_amp   += amp_ave[ii]
		centralR_ampr  += amp_ratio[ii]
		centralR_tdiff += tshift[ii]

	#%% If desired, find radial slowness nearest R_slow_plot
	lowest_Rslow = 1000000
	for slow_i in range(slowR_n):
		if abs(stack_Rslows[slow_i] - R_slow_plot) < lowest_Rslow:
			lowest_Rindex = slow_i
			lowest_Rslow = abs(stack_Rslows[slow_i] - R_slow_plot)

	print(str(slowR_n) + ' R slownesses, index ' + str(lowest_Rindex) + ' is closest to input parameter ' + str(R_slow_plot) + ', slowness diff there is ' + str(lowest_Rslow) + ' and slowness is ' + str(stack_Rslows[lowest_Rindex]))

	# Select only stacks with that slowness for transverse plot
	centralT_st1 = Stream()
	centralT_st2 = Stream()
	centralT_amp   = Stream()
	centralT_ampr  = Stream()
	centralT_tdiff = Stream()

	#%% to extract stacked time functions
	event1_sample = Stream()
	event2_sample = Stream()

	for slowT_i in range(slowT_n):
		ii = lowest_Rindex*slowT_n + slowT_i
		centralT_st1 += st1[ii]
		centralT_st2 += st2[ii]
		centralT_amp   += amp_ave[ii]
		centralT_ampr  += amp_ratio[ii]
		centralT_tdiff += tshift[ii]

	#%% compute timing time series
	ttt = (np.arange(len(st1[0].data)) * st1[0].stats.delta + start_buff) # in units of seconds

#%% Plot radial amp and tdiff vs time plots
	fig_index = 6
#	plt.close(fig_index)
	plt.figure(fig_index,figsize=(30,10))
	plt.xlim(start_buff,end_buff)
	plt.ylim(stack_Rslows[0], stack_Rslows[-1])
	for slowR_i in range(slowR_n):  # loop over radial slownesses
		dist_offset = stack_Rslows[slowR_i] # trying for approx degrees
		ttt = (np.arange(len(centralR_st1[slowR_i].data)) * centralR_st1[slowR_i].stats.delta
		 + (centralR_st1[slowR_i].stats.starttime - t1))
		plt.plot(ttt, (centralR_st1[slowR_i].data - np.median(centralR_st1[slowR_i].data))*plot_scale_fac /global_max + dist_offset, color = 'green')
		plt.plot(ttt, (centralR_st2[slowR_i].data - np.median(centralR_st2[slowR_i].data))*plot_scale_fac /global_max + dist_offset, color = 'red')
		# extract stacked time functions
		if get_stf != 0:
			if np.abs(stack_Rslows[slowR_i]- 0.005) < 0.000001: # kludge, not exactly zero when desired
				event1_sample = centralR_st1[slowR_i].copy()
				event2_sample = centralR_st2[slowR_i].copy()
#		plt.plot(ttt, (centralR_amp[slowR_i].data)  *plot_scale_fac/global_max + dist_offset, color = 'purple')
		if turn_off_black == 0:
			plt.plot(ttt, (centralR_tdiff[slowR_i].data)*plot_scale_fac/1 + dist_offset, color = 'black')
			plt.plot(ttt, (centralR_amp[slowR_i].data)*0.0 + dist_offset, color = 'lightgray') # reference lines
	plt.xlabel('Time (s)')
	plt.ylabel('R Slowness (s/km)')
	plt.title(ref_phase + ' seismograms and tdiff at ' + str(T_slow_plot) + ' T slowness, green is event1, red is event2')
	# Plot transverse amp and tdiff vs time plots
	fig_index = 7
#	plt.close(fig_index)
	plt.figure(fig_index,figsize=(30,10))
	plt.xlim(start_buff,end_buff)
	plt.ylim(stack_Tslows[0], stack_Tslows[-1])

	for slowT_i in range(slowT_n):  # loop over transverse slownesses
		dist_offset = stack_Tslows[slowT_i] # trying for approx degrees
		ttt = (np.arange(len(centralT_st1[slowT_i].data)) * centralT_st1[slowT_i].stats.delta
		 + (centralT_st1[slowT_i].stats.starttime - t1))
		plt.plot(ttt, (centralT_st1[slowT_i].data - np.median(centralT_st1[slowT_i].data))*plot_scale_fac /global_max + dist_offset, color = 'green')
		plt.plot(ttt, (centralT_st2[slowT_i].data - np.median(centralT_st2[slowT_i].data))*plot_scale_fac /global_max + dist_offset, color = 'red')
#		plt.plot(ttt, (centralT_amp[slowT_i].data)  *plot_scale_fac/global_max + dist_offset, color = 'purple')
		if turn_off_black == 0:
			plt.plot(ttt, (centralT_tdiff[slowT_i].data)*plot_scale_fac/1 + dist_offset, color = 'black')
			plt.plot(ttt, (centralT_amp[slowT_i].data)*0.0 + dist_offset, color = 'lightgray') # reference lines
	plt.xlabel('Time (s)')
	plt.ylabel('T Slowness (s/km)')
	plt.title(str(event_no) + '  ' + date_label1 + '  ' +ref_phase + ' seismograms and tdiff ' + str(R_slow_plot) + ' R slowness, green is event1, red is event2')
	os.chdir('/Users/vidale/Documents/PyCode/LASA/Quake_results/Plots')
#	plt.savefig(date_label1 + '_' + str(start_buff) + '_' + str(end_buff) + '_stack.png')

#%% R-T tshift averaged over time window
	fig_index = 8
	stack_slice = np.zeros((slowR_n,slowT_n))
	for slowR_i in range(slowR_n):  # loop over radial slownesses
		for slowT_i in range(slowT_n):  # loop over transverse slownesses
			index = slowR_i*slowT_n + slowT_i
			num_val = np.nanmedian(tshift[index].data)
#			num_val = statistics.median(tshift_full[index].data)
			stack_slice[slowR_i, slowT_i] = num_val # adjust for dominant frequency of 1.2 Hz, not 1 Hz
#	stack_slice[0,0] = -0.25
#	stack_slice[0,1] =  0.25
#	tdiff_clip = 0.4/1.2
	tdiff_clip_max =  tdiff_clip  # DO NOT LEAVE COMMENTED OUT!!
	tdiff_clip_min = -tdiff_clip

	y1, x1 = np.mgrid[slice(stack_Rslows[0], stack_Rslows[-1] + slow_delta, slow_delta),
				 slice(stack_Tslows[0], stack_Tslows[-1] + slow_delta, slow_delta)]

	fig, ax = plt.subplots(1, figsize=(7,6))
#		fig, ax = plt.subplots(1, figsize=(9,2))
#		fig.subplots_adjust(bottom=0.3)
#	c = ax.pcolormesh(x1, y1, stack_slice, cmap=plt.cm.bwr,      vmin = tdiff_clip_min, vmax = tdiff_clip_max)
	c = ax.pcolormesh(x1, y1, stack_slice, cmap=plt.cm.coolwarm, vmin = tdiff_clip_min, vmax = tdiff_clip_max)
	ax.axis([x1.min(), x1.max(), y1.min(), y1.max()])
	circle1 = plt.Circle((0, 0), 0.019, color='black', fill=False)
	ax.add_artist(circle1)
	circle2 = plt.Circle((0, 0), 0.040, color='black', fill=False)
	ax.add_artist(circle2)  #outer core limit
	fig.colorbar(c, ax=ax)
	plt.ylabel('R Slowness (s/km)')
	plt.title(ref_phase + ' time shift')
#	plt.title('T-R average time shift ' + date_label1 + ' ' + date_label2)
	plt.show()

#%% R-T amplitude averaged over time window
	fig_index = 9
	stack_slice = np.zeros((slowR_n,slowT_n))
	smax = 0
	for slowR_i in range(slowR_n):  # loop over radial slownesses
		for slowT_i in range(slowT_n):  # loop over transverse slownesses
			index = slowR_i*slowT_n + slowT_i
			num_val = np.nanmedian(amp_ave[index].data)
			stack_slice[slowR_i, slowT_i] = num_val
			if num_val > smax:
				smax = num_val
#	stack_slice[0,0] = 0

	y1, x1 = np.mgrid[slice(stack_Rslows[0], stack_Rslows[-1] + slow_delta, slow_delta),
				 slice(stack_Tslows[0], stack_Tslows[-1] + slow_delta, slow_delta)]

#	fig, ax = plt.subplots(1)
	fig, ax = plt.subplots(1, figsize=(7,6))
#	c = ax.pcolormesh(x1, y1, stack_slice/smax, cmap=plt.cm.gist_yarg, vmin = 0.5)
	c = ax.pcolormesh(x1, y1, stack_slice/smax, cmap=plt.cm.gist_rainbow_r, vmin = 0)
#	c = ax.pcolormesh(x1, y1, stack_slice, cmap=plt.cm.gist_rainbow_r, vmin = 0)
	ax.axis([x1.min(), x1.max(), y1.min(), y1.max()])
	circle1 = plt.Circle((0, 0), 0.019, color='black', fill=False)
	ax.add_artist(circle1)  #inner core limit
	circle2 = plt.Circle((0, 0), 0.040, color='black', fill=False)
	ax.add_artist(circle2)  #outer core limit

	c = ax.scatter(pred_Eslo, pred_Nslo, color='blue', s=100, alpha=0.75)
	c = ax.scatter(obs_Eslo, obs_Nslo, color='black', s=100, alpha=0.75)

	fig.colorbar(c, ax=ax)
	plt.xlabel('Transverse Slowness (s/km)')
	plt.ylabel('Radial Slowness (s/km)')
	plt.title(str(event_no) + '  ' + date_label1 + '  ' + ref_phase + ' beam amplitude')
#	plt.title('Beam amplitude ' + date_label1 + ' ' + date_label2)
	os.chdir('/Users/vidale/Documents/PyCode/LASA/Quake_results/Plots')
	plt.savefig(date_label1 + '_' + str(start_buff) + '_' + str(end_buff) + '_beam.png')
	plt.show()

#%%  Save processed files
	if ARRAY == 0:
		goto = '/Users/vidale/Documents/PyCode/Hinet'
	if ARRAY == 1:
		goto = '/Users/vidale/Documents/PyCode/LASA/Pro_Files'
	os.chdir(goto)

	fname = 'HD' + date_label1 + '_' + date_label2 + '_tshift.mseed'
	tshift_full.write(fname,format = 'MSEED')

	fname = 'HD' + date_label1 + '_' + date_label2 + '_amp_ave.mseed'
	amp_ave.write(fname,format = 'MSEED')

	fname = 'HD' + date_label1 + '_' + date_label2 + '_amp_ratio.mseed'
	amp_ratio.write(fname,format = 'MSEED')

#%% Option to write out stf
	if get_stf != 0:
		event1_sample.taper(0.1)
		event2_sample.taper(0.1)
		fname = 'HD' + date_label1 + '_stf.mseed'
		event1_sample.write(fname,format = 'MSEED')
		fname = 'HD' + date_label2 + '_stf.mseed'
		event2_sample.write(fname,format = 'MSEED')

	elapsed_time_wc = time.time() - start_time_wc
	print('This job took ' + str(elapsed_time_wc) + ' seconds')
	os.system('say "Done"')
Пример #14
0
def migrate_1d(rf_trace,**kwargs):
####################################################################################
   '''
   takes an rf trace and returns a dictionary with pierce points and associated
   reciever function ampltiudes

   *note it's best to pass a TauPyModel instance (eg, prem_5km), to avoid having 
    to initiate a new model every time you call this function
   '''

   #get kwargs
   depth      = kwargs.get('depth_range',np.arange(50,805,5))
   taup_model = kwargs.get('taup_model','None')
   format     = kwargs.get('format','rfh5')
   window     = kwargs.get('window',[-10,100])

   #geographical information
   if format == 'rfh5':
      gcarc = rf_trace.stats.gcarc
      dt    = rf_trace.stats.delta
      evla  = rf_trace.stats.evla
      evlo  = rf_trace.stats.evlo
      evdp  = rf_trace.stats.evdp
      stla  = rf_trace.stats.stla
      stlo  = rf_trace.stats.stlo
      az    = rf_trace.stats.az
      o     = rf_trace.stats.o

   #initializations
   amp           = np.zeros((len(depth)))
   origin        = geopy.Point(evla,evlo)
   bearing       = az
   pierce_dict   = []
   if taup_model == 'None':
      taup_model = TauPyModel('prem_5km')

   ii = 0
   for d in depth:
      phase  = 'P'+str(d)+'s'
      pierce = taup_model.get_pierce_points(evdp,gcarc,phase_list=[phase])
      arrs   = taup_model.get_travel_times(evdp,gcarc,phase_list=['P',phase])

      #in case there's duplicate phase arrivals
      P_arrs = []
      Pds_arrs = []
      for arr in arrs:
        if arr.name == 'P':
           P_arrs.append(arr)
           #p_arr = arr.time
        elif arr.name == phase:
           #pds_arr = arr.time
           Pds_arrs.append(arr)
      p_arr = P_arrs[0].time 
      pds_arr = Pds_arrs[0].time

      #determine amplitude at each depth
      #window_start = o #TODO update writeh5py_dict so that win_start win_end are written
      pds_minus_p  = pds_arr - p_arr
      i_start      = int((0.0 - window[0])/dt)
      i_t          = int(pds_minus_p/dt) + i_start
      amp[ii]      = rf_trace.data[i_t]

      #find pierce points and create pierce dictionary
      points = pierce[0].pierce
      for p in points:
         if p['depth'] == d and np.degrees(p['dist']) > 20.0:
            prc_dist = np.degrees(p['dist'])
            d_km     = prc_dist * ((2*np.pi*6371.0/360.0))
            destination = VincentyDistance(kilometers=d_km).destination(origin,bearing)
            lat = destination[0]
            lon = destination[1]
            row = {'depth':d,'dist':prc_dist,'lat':lat,'lon':lon,'amplitude':amp[ii]}
            pierce_dict.append(row)
      ii += 1

   return pierce_dict
Пример #15
0
def pro7_pair_scan(eq_num1, eq_num2, slow_delta = 0.0005, turn_off_black = 1,
              slowR_lo = -0.1, slowR_hi = 0.1, slowT_lo = -0.1, slowT_hi = 0.1,
              start_buff = 50, end_buff = 50,fig_index = 401, do_T = False, do_R = False,
              ZslowR_lo = -0.1, ZslowR_hi = 0.1, ZslowT_lo = -0.1, ZslowT_hi = 0.1,
              Zstart_buff = 50, Zend_buff = 50, zoom = False, tdiff_clip = 1,
              ref_phase = 'blank', cc_thres = 0.8, min_amp = 0.2,
              R_slow_plot = 0.06, T_slow_plot = 0.0, no_plots = False,
              snaptime = 8, snaps = 10, snap_depth = 0,
              nR_plots  = 3, nT_plots = 3, slow_incr = 0.01, NS = False,
              ARRAY = 0, auto_slice = True, two_slice_plots = False, beam_sums = True,
              wiggly_plots = 0, start_beam = 0, end_beam = 0, log_plot = False,
              log_plot_range = 2, no_tdiff_plot = False,
              wig_scale_fac = 1, tdiff_scale_fac = 1):

    from obspy import read
    from obspy.taup import TauPyModel
    import numpy as np
    import os
    import matplotlib.pyplot as plt
    import time
    import sys
    import math
    from obspy import UTCDateTime
    from obspy import Stream
    from termcolor import colored
    from obspy.geodetics import gps2dist_azimuth
    model = TauPyModel(model='iasp91')

    print(colored('Running pro7_pair_scan', 'cyan'))
    start_time_wc = time.time()

    if zoom == True:
        if Zstart_buff  < start_buff:
            print(colored(f'Zstart_buff of {Zstart_buff:.1f} cannot be < start_buff of {start_buff:.1f}', 'red'))
            Zstart_buff = start_buff
            exit()
        if Zend_buff    > end_buff:
            print(colored(f'Zend_buff of {Zend_buff:.1f} cannot be < end_buff of {end_buff:.1f}', 'red'))
            Zend_buff   = end_buff
            exit()

    #%% Input parameters and computed files
    fname1 = '/Users/vidale/Documents/Research/IC/EvLocs/event' + str(eq_num1) + '.txt'
    fname2 = '/Users/vidale/Documents/Research/IC/EvLocs/event' + str(eq_num2) + '.txt'
    file1 = open(fname1, 'r')
    file2 = open(fname2, 'r')
    lines1=file1.readlines()
    lines2=file2.readlines()
    split_line1  = lines1[0].split()
    split_line2  = lines2[0].split()
    t1           = UTCDateTime(split_line1[1])
    # t2 = UTCDateTime(split_line2[1])  # not needed
    # date_label = '2018-04-02' # dates in filename
    date_label1  = split_line1[1][0:10]
    date_label2  = split_line2[1][0:10]
    save_name = '/Users/vidale/Documents/Research/IC/Plots/' + date_label1 + '_' + date_label2 + '_'
    ev_lat       = float(split_line1[2])
    ev_lon       = float(split_line1[3])
    ev_depth     = float(split_line1[4])

    if ARRAY == 0:
        ref_lat = 36.3  # °N, around middle of Japan
        ref_lon = 138.5 # °E
    elif ARRAY == 1:
        ref_lat = 46.7  # °N keep only inner rings A-D
        ref_lon = -106.22   # °E
    elif ARRAY == 2: # China set and center
        ref_lat = 38      # °N
        ref_lon = 104.5   # °E
    ref_distance = gps2dist_azimuth(ref_lat,ref_lon,ev_lat,ev_lon)
    ref_dist     = ref_distance[0]/(1000*111)
    ref_az       = ref_distance[1]
    ref_back_az  = ref_distance[2]

    arrivals_ref   = model.get_travel_times(source_depth_in_km=ev_depth,distance_in_degree=ref_dist, phase_list=[ref_phase])
    arrival_time = arrivals_ref[0].time

    # Estimate slowness of reference phase
    arrivals1 = model.get_travel_times(source_depth_in_km=ev_depth,distance_in_degree=ref_dist-0.5,phase_list=[ref_phase])
    arrivals2 = model.get_travel_times(source_depth_in_km=ev_depth,distance_in_degree=ref_dist+0.5,phase_list=[ref_phase])
    dtime = arrivals2[0].time - arrivals1[0].time
    event_pred_slo  = dtime/111.  # s/km
    # convert to pred rslo and tslo
    if NS == True:    #  rotate predicted slowness to N and E
        print(f'Array  lat {ref_lat:.0f}, lon  {ref_lon:.0f}, Event lat {ev_lat:.0f}, lon {ev_lon:.0f}, az {ref_az:.0f}, baz {ref_back_az:.0f}')
        sin_baz = np.sin(ref_az * np.pi /180)
        cos_baz = np.cos(ref_az * np.pi /180)
        pred_Nslo = event_pred_slo * cos_baz
        pred_Eslo = event_pred_slo * sin_baz
    else:
        pred_Nslo = event_pred_slo
        pred_Eslo = 0

    name_str = '/Users/vidale/Documents/Research/IC/Pro_files/HD' + date_label1 + '_' + date_label2 + '_'
    fname1  = name_str + 'tshift.mseed'
    fname2  = name_str + 'amp_ave.mseed'
    fname3  = name_str + 'cc.mseed'
    tdiff   = Stream()
    amp_ave = Stream()
    cc      = Stream()
    tdiff   = read(fname1)
    amp_ave = read(fname2)
    cc      = read(fname3)

    dt     = tdiff[  0].stats.delta
    dt_cc  = cc[     0].stats.delta
    dt_amp = amp_ave[0].stats.delta
    nt     = len(tdiff[  0].data)
    nt_cc  = len(cc[     0].data)
    nt_amp = len(amp_ave[0].data)
    print(f'cc      data length is {nt_cc } time pts, dt is {dt_cc :.2f}, so record length is {dt_cc  * nt_cc:.0f} seconds')
    print(f'tdiff   data length is {nt    } time pts, dt is {dt    :.2f}, so record length is {dt     * nt   :.0f} seconds')
    print(f'amp_ave data length is {nt_amp} time pts, dt is {dt_amp:.2f}, so record length is {dt_amp * nt   :.0f} seconds')
    print(f'input grids: tdiff {len(tdiff)} elements, cc {len(cc)} elements, amp_ave {len(amp_ave)} elements')

    #%% Make grid of slownesses
    slowR_n = int(round(1 + (slowR_hi - slowR_lo)/slow_delta))  # number of slownesses
    slowT_n = int(round(1 + (slowT_hi - slowT_lo)/slow_delta))  # number of slownesses
    stack_nt = int(round(1 + (end_buff - start_buff)/dt))  # number of time points
    print(f'{slowR_n} radial slownesses, low is {slowR_lo}, high is {slowR_hi}')
    print(f'{slowT_n} transv slownesses, low is {slowT_lo}, high is {slowT_hi}')
    print(f'{stack_nt} time points, low is {start_buff} s, high is {end_buff} s, dt is {dt}')
    # In English, stack_slows = range(slow_n) * slow_delta - slow_lo
    a1R = range(slowR_n)
    a1T = range(slowT_n)
    stack_Rslows = [(x * slow_delta + slowR_lo) for x in a1R]
    stack_Tslows = [(x * slow_delta + slowT_lo) for x in a1T]

    #%% Select subset if Zoomed
    if zoom == True:
        Ztdiff   = Stream()
        Zamp_ave = Stream()
        Zcc      = Stream()
        print(f'before calculation, tdiff[0] has length {len(tdiff[0])})')
        for slowR_i in range(slowR_n):  # loop over radial slownesses
            for slowT_i in range(slowT_n):  # loop over transverse slownesses, kludge to evade rounding error
                if ((stack_Rslows[slowR_i] >= ZslowR_lo - 0.000001) and (stack_Rslows[slowR_i] <= ZslowR_hi + 0.000001) and
                    (stack_Tslows[slowT_i] >= ZslowT_lo - 0.000001) and (stack_Tslows[slowT_i] <= ZslowT_hi + 0.000001)):
                    index = slowR_i*slowT_n + slowT_i
                    s_t = t1 + Zstart_buff
                    e_t = t1 + Zend_buff
                    Ztdiff   += tdiff[  index].trim(starttime=s_t, endtime=e_t)
                    Zamp_ave += amp_ave[index].trim(starttime=s_t, endtime=e_t)
                    Zcc      += cc[     index].trim(starttime=s_t, endtime=e_t)
                            #tr.trim(starttime=s_t,endtime = e_t)
        tdiff   = Ztdiff  # tdiff might be one element shorter than expected
        amp_ave = Zamp_ave
        cc      = Zcc
        nt = len(tdiff[0].data)
        start_buff = Zstart_buff
        # make time series
        print(f'after calculation, Ztdiff[0] has length {len(Ztdiff[0])}')
        print(f'after calculation, tdiff[0] has length {len(tdiff[0])}')
        print(f'slowR_lo  is {slowR_lo}  and slowR_hi  is {slowR_hi}  and slowT_lo  is {slowT_lo}  and slowT_hi is {slowT_hi}')
        print(f'ZslowR_lo is {ZslowR_lo} and ZslowR_hi is {ZslowR_hi} and ZslowT_lo is {ZslowT_lo} and ZslowT_hi is {ZslowT_hi}')

        #%% -- Re-make subset with more limited grid of slownesses
        slowR_lo   = ZslowR_lo
        slowR_hi   = ZslowR_hi
        slowT_lo   = ZslowT_lo
        slowT_hi   = ZslowT_hi
        end_buff   = Zend_buff
        start_buff = Zstart_buff
        end_buff   = Zend_buff
        slowR_n = int(round(1 + (slowR_hi - slowR_lo)/slow_delta))  # number of slownesses
        slowT_n = int(round(1 + (slowT_hi - slowT_lo)/slow_delta))  # number of slownesses
        stack_nt = int(round(1 + ((end_buff - start_buff)/dt)))  # number of time points
        if stack_nt != len(Ztdiff[0]):
            print(f'Array length rounding clash, tdiff[0] has length {len(tdiff[0])}, stack_nt is {stack_nt}')
            stack_nt = len(tdiff[0])
        print('After zoom ' + str(slowT_n) + ' trans slownesses, hi and lo are ' + str(slowT_hi) + '  ' + str(slowT_lo) + ' stack_nt is ' + str(stack_nt))
        # In English, stack_slows = range(slow_n) * slow_delta - slow_lo
        a1R = range(slowR_n)
        a1T = range(slowT_n)
        stack_Rslows = [(x * slow_delta + slowR_lo) for x in a1R]
        stack_Tslows = [(x * slow_delta + slowT_lo) for x in a1T]
        print('After zoom ' + str(slowR_n) + ' radial slownesses, ' + str(slowT_n) + ' trans slownesses, ')
        print('Output trace starttime ' + str(Ztdiff[0].stats.starttime))

    ttt = (np.arange(len(tdiff[0].data)) * tdiff[0].stats.delta + start_buff) # in units of seconds

    global_max = 0  # find global_max, largest amplitude in amp_ave beam array envelopes
    for slow_i in range(len(amp_ave)): # find global max of ave_amp
        local_max = max(amp_ave[slow_i].data)
        if local_max > global_max:
            global_max = local_max

    #%% Mask out weak and/or less correlated points
    amp_ave_thres = amp_ave.copy()  # copy amp envelope array, set amps and tdiff below thresholds to NaN using global_max
    nt = len(tdiff[0].data)
    for slow_i in range(len(tdiff)): # don't plot less robust points, change them to NANs
        for it in range(nt):
            if (cc[slow_i].data[it] < cc_thres) or (amp_ave[slow_i].data[it] < (min_amp * global_max)):
                tdiff[        slow_i].data[it] = np.nan
                amp_ave_thres[slow_i].data[it] = np.nan

    for slow_i in range(len(tdiff)): # set NaNs to avoid including (errant?) large time shifts
        for it in range(nt-1):
            if (abs(tdiff[slow_i].data[it+1] - tdiff[slow_i].data[it]) > tdiff_clip):
                tdiff[slow_i].data[it] = np.nan

    if log_plot == True:  # convert amp envelope array to log amp and record global_max of logs
        global_max = -100  # different global max if converting to plotting log amp
                           # remember logs can be negative
        for slow_i in range(len(amp_ave)): # find global max of ave_amp
            for data_i in range(len(amp_ave[slow_i].data)): # find global max of ave_amp
                amp_ave[slow_i].data[data_i] = math.log10(amp_ave[slow_i].data[data_i])
            local_max = max(amp_ave[slow_i].data)
            if local_max > global_max:
                global_max = local_max

#%% Auto slice option
    if auto_slice == True:

#%% -- compute timing time series
        #%% -- R slices
        ttt = (np.arange(stack_nt) * dt + start_buff)
        if do_R == True:  # remember plots scanning R are those at constant T
            # Rmean_st = Stream() # array to collect average tdiff
            # Rmean_am = Stream() # array to collect average amp
            for T_cnt in range(-nR_plots, nR_plots + 1):
                if nR_plots * slow_incr > slowT_hi:
                    print('nR_plots * slow_incr > slowT_hi, out of range')
                    sys.exit()
                if -(nR_plots * slow_incr) < slowT_lo:
                    print('-nR_plots * slow_incr < slowT_lo, out of range')
                    sys.exit()
                #%% -- -- gather R data
                lowest_Tslow = 1000000  # find index of row with T_cnt slowness, awkward coding
                target_slow = (T_cnt * slow_incr)
                for slow_i in range(slowT_n):
                    if abs(stack_Tslows[slow_i] - target_slow) < lowest_Tslow:
                        lowest_Tindex = slow_i
                        lowest_Tslow = abs(stack_Tslows[slow_i] - target_slow)
                print(f'For R plot {T_cnt:2d}, {lowest_Tindex:3d} is T slow nearest {target_slow:.3f}, difference is {lowest_Tslow:.3f}')

                # Collect data with that slowness for R (T=const) plot
                Rcentral_st = Stream()
                Rcentral_am = Stream()
                for slowR_i in range(slowR_n):
                    Rcentral_st += tdiff[  slowR_i*slowT_n + lowest_Tindex]
                    Rcentral_am += amp_ave[slowR_i*slowT_n + lowest_Tindex]
                # if T_cnt == -nR_plots: # Form arrays just before first slice is summed   # doesn't do NaNs, also 20 lines up
                #     Rmean_st = np.zeros((slowR_n,stack_nt))
                #     Rmean_am = np.zeros((slowR_n,stack_nt))
                # for it in range(stack_nt):  # add slice one point at a time
                #     for slowR_i in range(slowR_n):  # loop over slownesses and time
                #         num_val = Rcentral_st[slowR_i].data[it]
                #         Rmean_st[slowR_i, it] += num_val
                #         num_val = Rcentral_am[slowR_i].data[it]
                #         Rmean_am[slowR_i, it] += num_val

                #%% -- -- plot R tdiff
                if no_tdiff_plot == False:
                    stack_arrayR_Tdf = np.zeros((slowR_n,stack_nt))
                    for it in range(stack_nt):  # check points one at a time
                        for slowR_i in range(slowR_n):  # loop over slownesses
                            num_val = Rcentral_st[slowR_i].data[it]
                            stack_arrayR_Tdf[slowR_i, it] = num_val

                    y, x = np.mgrid[slice(stack_Rslows[0], stack_Rslows[-1] + slow_delta, slow_delta),
                                 slice(ttt[0], ttt[-1] + dt, dt)]

                    fig, ax = plt.subplots(1, figsize=(10,3))
                    c = ax.pcolormesh(x, y, stack_arrayR_Tdf, cmap=plt.cm.coolwarm, vmin= -tdiff_clip, vmax=tdiff_clip)
                    fig.subplots_adjust(bottom=0.2)
                    ax.axis([x.min(), x.max(), y.min(), y.max()])
                    fig.colorbar(c, ax=ax, label='time shift (s)')
                    c = ax.scatter(arrival_time, event_pred_slo, color='black'  , s=50, alpha=0.75)
                    plt.xlabel('Time (s)')
                    plt.ylabel('Radial slowness (s/km)')
                    plt.title(f'Tdiff at {target_slow:.3f} s/km T slowness, {fname1[48:58]}  {fname1[59:69]}  min amp {min_amp:.1f}  cc_thres {cc_thres:.2f}')
                    if no_plots == False:
                        plt.show()
                    fig_index += 1

                #%% -- -- plot R amp
                stack_arrayR_Amp = np.zeros((slowR_n,stack_nt))
                for it in range(stack_nt):  # check points one at a time
                    for slowR_i in range(slowR_n):  # for this station, loop over slownesses
                        num_val = Rcentral_am[slowR_i].data[it]
                        stack_arrayR_Amp[slowR_i, it] = num_val

                y, x = np.mgrid[slice(stack_Rslows[0], stack_Rslows[-1] + slow_delta, slow_delta),
                             slice(ttt[0], ttt[-1] + dt, dt)]

                fig, ax = plt.subplots(1, figsize=(10,3))
                if log_plot == True:
                    c = ax.pcolormesh(x, y, stack_arrayR_Amp - global_max, cmap=plt.cm.gist_rainbow_r, vmin= - log_plot_range, vmax=0)
                else:
                    c = ax.pcolormesh(x, y, stack_arrayR_Amp, cmap=plt.cm.gist_rainbow_r, vmin= 0, vmax=global_max)
                fig.subplots_adjust(bottom=0.2)
                ax.axis([x.min(), x.max(), y.min(), y.max()])
                if log_plot == True:
                    fig.colorbar(c, ax=ax, label='log amplitude')
                else:
                    fig.colorbar(c, ax=ax, label='linear amplitude')
                c = ax.scatter(arrival_time, event_pred_slo, color='black'  , s=50, alpha=0.75)
                plt.xlabel('Time (s)')
                if NS == True:
                    plt.ylabel('North Slowness (s/km)')
                    plt.title(f'Amp at {target_slow:.3f} s/km E slowness, {fname1[48:58]}  {fname1[59:69]}')
                else:
                    plt.ylabel('Radial Slowness (s/km)')
                    plt.title(f'Amp at {target_slow:.3f} s/km T slowness, {fname1[48:58]}  {fname1[59:69]}')
                if no_plots == False:
                    plt.show()
                fig_index += 1

        #%% -- T slices
        if do_T == True:  # remember plots scanning T are those at constant R
            for R_cnt in range(-nT_plots, nT_plots + 1):
                if nT_plots * slow_incr > slowR_hi:
                    print('nT_plots * slow_incr > slowR_hi, out of range')
                    sys.exit()
                if -(nT_plots * slow_incr) < slowR_lo:
                    print('-nT_plots * slow_incr < slowR_lo, out of range')
                    sys.exit()
                #%% -- -- gather T data
                lowest_Rslow = 1000000  # find index of row closest to R_cnt slowness
                target_slow = (R_cnt * slow_incr) # radial slowness of this slice in s/°
                for slow_i in range(slowR_n):
                    if abs(stack_Rslows[slow_i] - target_slow) < lowest_Rslow:
                        lowest_Rindex = slow_i
                        lowest_Rslow = abs(stack_Rslows[slow_i] - target_slow)

                print(f'For T plot {R_cnt:2d}, {lowest_Rindex:3d} is R slow nearest {target_slow:.3f}, difference is {lowest_Rslow:.3f}')

                # Collect data with that slowness for T (R=const) plot
                Tcentral_st = Stream()
                Tcentral_am = Stream()
                for slowT_i in range(slowT_n):
                    Tcentral_st += tdiff[  lowest_Rindex*slowT_n + slowT_i]
                    Tcentral_am += amp_ave[lowest_Rindex*slowT_n + slowT_i]

                #%% -- -- plot T tdiff
                if no_tdiff_plot == False:
                    stack_arrayT_Tdf = np.zeros((slowT_n,stack_nt))
                    for it in range(stack_nt):  # check points one at a time
                        for slowT_i in range(slowT_n):  # for this station, loop over slownesses
                            num_val = Tcentral_st[slowT_i].data[it]
                            stack_arrayT_Tdf[slowT_i, it] = num_val

                    y, x = np.mgrid[slice(stack_Tslows[0], stack_Tslows[-1] + slow_delta, slow_delta),
                                 slice(ttt[0], ttt[-1] + dt, dt)]

                    fig, ax = plt.subplots(1, figsize=(10,3))
                    c = ax.pcolormesh(x, y, stack_arrayT_Tdf, cmap=plt.cm.coolwarm, vmin= -tdiff_clip, vmax=tdiff_clip)
                    fig.subplots_adjust(bottom=0.2)
                    ax.axis([x.min(), x.max(), y.min(), y.max()])
                    fig.colorbar(c, ax=ax, label='time shift (s)')
                    c = ax.scatter(arrival_time, 0, color='black'  , s=50, alpha=0.75)
                    plt.xlabel('Time (s)')
                    plt.ylabel('Transverse slowness (s/km)')
                    plt.title(f'{ref_phase} Tdiff at {target_slow:.3f} s/km R slowness, {fname1[48:58]}  {fname1[59:69]}  min amp {min_amp:.1f}  cc_thres {cc_thres:.2f}')
                    if no_plots == False:
                        plt.show()
                    fig_index += 1

                #%% -- -- plot T amp
                stack_arrayT_Amp = np.zeros((slowT_n,stack_nt))
                for it in range(stack_nt):  # check points one at a time
                    for slowT_i in range(slowT_n):  # for this station, loop over slownesses
                        num_val = Tcentral_am[slowT_i].data[it]
                        stack_arrayT_Amp[slowT_i, it] = num_val

                y, x = np.mgrid[slice(stack_Tslows[0], stack_Tslows[-1] + slow_delta, slow_delta),
                             slice(ttt[0], ttt[-1] + dt, dt)]

                fig, ax = plt.subplots(1, figsize=(10,3))
                if log_plot == True:
                    c = ax.pcolormesh(x, y, stack_arrayT_Amp - global_max, cmap=plt.cm.gist_rainbow_r, vmin= - log_plot_range, vmax=0)
                else:
                    c = ax.pcolormesh(x, y, stack_arrayT_Amp, cmap=plt.cm.gist_rainbow_r, vmin= 0, vmax=global_max)
                fig.subplots_adjust(bottom=0.2)
                ax.axis([x.min(), x.max(), y.min(), y.max()])
                if log_plot == True:
                    fig.colorbar(c, ax=ax, label='log amplitude')
                else:
                    fig.colorbar(c, ax=ax, label='linear amplitude')
                c = ax.scatter(arrival_time, 0, color='black'  , s=50, alpha=0.75)
                plt.xlabel('Time (s)')
                if NS == True:
                    plt.ylabel('East Slowness (s/km)')
                    plt.title(f'Amp at {target_slow:.3f} s/km N slowness, {fname1[48:58]}  {fname1[59:69]}')
                else:
                    plt.ylabel('Transverse Slowness (s/km)')
                    plt.title(f'Amp at {target_slow:.3f} s/km R slowness, {fname1[48:58]}  {fname1[59:69]}')
                if no_plots == False:
                    plt.show()
                fig_index += 1

    #%% 2-slices-plus-snaps option
    if two_slice_plots == True:
        #%% -- Find slowness arrays for R and T slices
        lowest_Tslow = 1000000
        for slow_i in range(slowT_n):
            if abs(stack_Tslows[slow_i] - T_slow_plot) < lowest_Tslow:
                lowest_Tindex = slow_i
                lowest_Tslow = abs(stack_Tslows[slow_i] - T_slow_plot)

        print(f'{lowest_Tindex:4d} is T slow nearest {T_slow_plot:.3f}, difference is {lowest_Tslow:.3f}')

        lowest_Rslow = 1000000
        for slow_i in range(slowR_n):
            if abs(stack_Rslows[slow_i] - R_slow_plot) < lowest_Rslow:
                lowest_Rindex = slow_i
                lowest_Rslow = abs(stack_Rslows[slow_i] - R_slow_plot)

        print(f'{lowest_Rindex:4d} is R slow nearest 0.005, difference is {lowest_Rslow:.3f}')

        #%% -- Extract and sum tdiff and amp slices from beam matrix
        # Select only stacks with that slowness for Transverse plot
        centralR_Dst = Stream()
        for slowR_i in range(slowR_n):
            centralR_Dst += tdiff[slowR_i*slowT_n + lowest_Tindex]

        # Select only stacks with that slowness for Radial plot
        centralT_Dst = Stream()
        for slowT_i in range(slowT_n):
            centralT_Dst += tdiff[lowest_Rindex*slowT_n + slowT_i]

        # Select only stacks with that slowness for Transverse plot
        centralR_Ast = Stream()
        for slowR_i in range(slowR_n):
            centralR_Ast += amp_ave[slowR_i*slowT_n + lowest_Tindex]

        # Select only stacks with that slowness for Radial plot
        centralT_Ast = Stream()
        for slowT_i in range(slowT_n):
            centralT_Ast += amp_ave[lowest_Rindex*slowT_n + slowT_i]

        #%% -- Stack plots
        stack_array = np.zeros((slowR_n,stack_nt))

        for it in range(stack_nt):  # check points one at a time
            for slowR_i in range(slowR_n):  # for this station, loop over slownesses
                num_val = centralR_Dst[slowR_i].data[it]
                stack_array[slowR_i, it] = num_val

        y, x = np.mgrid[slice(stack_Rslows[0], stack_Rslows[-1] + slow_delta, slow_delta),
                     slice(ttt[0], ttt[-1] + dt, dt)]

        fig, ax = plt.subplots(1, figsize=(10,3))
        print(f'len(x) is {len(x)} and len(y) is {len(y)}')
        print(f'len(stack_Rslows) is {len(stack_Rslows)} and len(ttt) is {len(ttt)}')
        print(f'slowR_n is {slowR_n} and stack_nt is {stack_nt}')
        c = ax.pcolormesh(x, y, stack_array, cmap=plt.cm.coolwarm, vmin=-tdiff_clip, vmax=tdiff_clip)
        fig.colorbar(c, ax=ax, label='time lag (s)')
        fig.subplots_adjust(bottom=0.2)
        ax.axis([x.min(), x.max(), y.min(), y.max()])
        c = ax.scatter(arrival_time, event_pred_slo, color='black'  , s=50, alpha=0.75)
        plt.xlabel('Time (s)')
        if NS == True:
            plt.ylabel('N slowness (s/km)')
            plt.title('Time lag at ' + str(T_slow_plot) + ' s/km E slowness, ' + date_label1 + ' ' + date_label2)
            plt.savefig(save_name + str(start_buff) + '_' + str(end_buff) + '_NtdiffSection.png')
        else:
            plt.ylabel('R slowness (s/km)')
            plt.title('Time lag at ' + str(T_slow_plot) + ' s/km T slowness, ' + date_label1 + ' ' + date_label2)
            plt.savefig(save_name + str(start_buff) + '_' + str(end_buff) + '_RtdiffSection.png')
        if no_plots == False:
            plt.show()

        fig_index += 1

        stack_array = np.zeros((slowT_n,stack_nt))

        for it in range(stack_nt):  # check points one at a time
            for slowT_i in range(slowT_n):  # for this station, loop over slownesses
                num_val = centralT_Dst[slowT_i].data[it]
                stack_array[slowT_i, it] = num_val

        y, x = np.mgrid[slice(stack_Tslows[0], stack_Tslows[-1] + slow_delta, slow_delta),
                     slice(ttt[0], ttt[-1] + dt, dt)]

        fig, ax = plt.subplots(1, figsize=(10,3))
        fig.subplots_adjust(bottom=0.2)
        c = ax.pcolormesh(x, y, stack_array, cmap=plt.cm.coolwarm, vmin=-tdiff_clip, vmax=tdiff_clip)
        fig.colorbar(c, ax=ax, label='time lag (s)')
        ax.axis([x.min(), x.max(), y.min(), y.max()])
        c = ax.scatter(arrival_time, 0, color='black'  , s=50, alpha=0.75)
        plt.xlabel('Time (s)')
        if NS == True:
            plt.ylabel('E slowness (s/km)')
            plt.title('Time lag at ' + str(R_slow_plot) + ' s/km N slowness, ' + date_label1 + ' ' + date_label2)
            plt.savefig(save_name + str(start_buff) + '_' + str(end_buff) + '_EtdiffSection.png')
        else:
            plt.ylabel('T slowness (s/km)')
            plt.title('Time lag at ' + str(R_slow_plot) + ' s/km R slowness, ' + date_label1 + ' ' + date_label2)
            plt.savefig(save_name + str(start_buff) + '_' + str(end_buff) + '_TtdiffSection.png')
        if no_plots == False:
            plt.show()

        fig_index += 1

        stack_array = np.zeros((slowR_n,stack_nt))

        for it in range(stack_nt):  # check points one at a time
            for slowR_i in range(slowR_n):  # for this station, loop over slownesses
                num_val = centralR_Ast[slowR_i].data[it]
                stack_array[slowR_i, it] = num_val

        y, x = np.mgrid[slice(stack_Rslows[0], stack_Rslows[-1] + slow_delta, slow_delta),
                     slice(ttt[0], ttt[-1] + dt, dt)]

        fig, ax = plt.subplots(1, figsize=(10,3))
        print(f'len(x) is {len(x)} and len(y) is {len(y)}')
        print(f'len(stack_Rslows) is {len(stack_Rslows)} and len(ttt) is {len(ttt)}')
        print(f'slowR_n is {slowR_n} and stack_nt is {stack_nt}')
        c = ax.pcolormesh(x, y, stack_array, cmap=plt.cm.gist_rainbow_r, vmin=0)
        fig.colorbar(c, ax=ax, label='linear amplitude')
        fig.subplots_adjust(bottom=0.2)
        ax.axis([x.min(), x.max(), y.min(), y.max()])
        c = ax.scatter(arrival_time, event_pred_slo, color='black'  , s=50, alpha=0.75)
        plt.xlabel('Time (s)')
        if NS == True:
            plt.ylabel('N slowness (s/km)')
            plt.title('Amp at ' + str(T_slow_plot) + ' s/km E slowness, ' + date_label1 + ' ' + date_label2)
            plt.savefig(save_name + str(start_buff) + '_' + str(end_buff) + '_NampSection.png')
        else:
            plt.ylabel('R slowness (s/km)')
            plt.title('Amp at ' + str(T_slow_plot) + ' s/km T slowness, ' + date_label1 + ' ' + date_label2)
            plt.savefig(save_name + str(start_buff) + '_' + str(end_buff) + '_RampSection.png')
        if no_plots == False:
            plt.show()

        fig_index += 1

        stack_array = np.zeros((slowT_n,stack_nt))

        for it in range(stack_nt):  # check points one at a time
            for slowT_i in range(slowT_n):  # for this station, loop over slownesses
                num_val = centralT_Ast[slowT_i].data[it]
                stack_array[slowT_i, it] = num_val

        y, x = np.mgrid[slice(stack_Tslows[0], stack_Tslows[-1] + slow_delta, slow_delta),
                     slice(ttt[0], ttt[-1] + dt, dt)]

        fig, ax = plt.subplots(1, figsize=(10,3))
        fig.subplots_adjust(bottom=0.2)
        c = ax.pcolormesh(x, y, stack_array, cmap=plt.cm.gist_rainbow_r, vmin=0)
        fig.colorbar(c, ax=ax, label='linear amplitude')
        ax.axis([x.min(), x.max(), y.min(), y.max()])
        c = ax.scatter(arrival_time, 0, color='black'  , s=50, alpha=0.75)
        plt.xlabel('Time (s)')
        if NS == True:
            plt.ylabel('E slowness (s/km)')
            plt.title('Amp at ' + str(R_slow_plot) + ' s/km N slowness, ' + date_label1 + ' ' + date_label2)
            plt.savefig(save_name + str(start_buff) + '_' + str(end_buff) + '_EampSection.png')
        else:
            plt.ylabel('T slowness (s/km)')
            plt.title('Amp at ' + str(R_slow_plot) + ' s/km R slowness, ' + date_label1 + ' ' + date_label2)
            plt.savefig(save_name + str(start_buff) + '_' + str(end_buff) + '_TampSection.png')
        if no_plots == False:
            plt.show()

        fig_index += 1

    #%% -- Snap plots
        stack_slice = np.zeros((slowR_n,slowT_n))
        if snaps > 0:
            # check for impossible parameters
            if (start_buff + snaptime) < start_buff:
                print(f'snaptime {start_buff + snaptime:.0f} is earlier than start_buff of {start_buff:.0f}')
                sys.exit(-1)
            last_snap = snaptime + snaps*snap_depth
            if (start_buff + last_snap) > end_buff:
                print(f'last snap {last_snap:.0f} is later than end_buff of {end_buff:.0f}')
                sys.exit(-1)

            for snap_num in range(snaps):
                snap_start = start_buff + snaptime + (snap_num  ) * snap_depth
                snap_end   = start_buff + snaptime + (snap_num+1) * snap_depth
                fig_index += 1
                it_start = int(round((snap_start - start_buff)/dt))
                it_end   = int(round((snap_end   - start_buff)/dt))
                # plt.savefig(save_name + str(start_buff + snap_start) + '_' + str(start_buff + snap_end) + '_Abeam.png')
                for slowR_i in range(slowR_n):  # loop over radial slownesses
                    for slowT_i in range(slowT_n):  # loop over transverse slownesses
                        index = slowR_i*slowT_n + slowT_i
                        num_val = np.nanmean(tdiff[index].data[it_start:it_end])
                        stack_slice[slowR_i, slowT_i] = num_val

                y1, x1 = np.mgrid[slice(stack_Rslows[0], stack_Rslows[-1] + slow_delta, slow_delta),
                             slice(stack_Tslows[0], stack_Tslows[-1] + slow_delta, slow_delta)]

                fig, ax = plt.subplots(1, figsize=(7,0.8*7))
                c = ax.pcolormesh(x1, y1, stack_slice, cmap=plt.cm.coolwarm, vmin=-tdiff_clip, vmax=tdiff_clip)
                ax.axis([x1.min(), x1.max(), y1.min(), y1.max()])
                fig.colorbar(c, ax=ax, label = 'time shift (s)')
                c = ax.scatter(pred_Eslo, pred_Nslo, color='black'  , s=50, alpha=0.75)
                c = ax.scatter(        0,         0, color='black' , s=50,  alpha=0.75)
                circle1 = plt.Circle((0, 0), 0.019, color='black', fill=False)
                ax.add_artist(circle1)  # inner core limit
                circle2 = plt.Circle((0, 0), 0.040, color='black', fill=False)
                ax.add_artist(circle2)  # outer core limit
                plt.title(f'Tdiff {snap_start:.0f} to {snap_end:.0f}s  {date_label1} {date_label2}  events {eq_num1}&{eq_num2}')
                if NS == True:
                    plt.xlabel('E Slowness (s/km)')
                    plt.ylabel('N Slowness (s/km)')
                else:
                    plt.xlabel('T Slowness (s/km)')
                    plt.ylabel('R Slowness (s/km)')
                plt.savefig(save_name + str(snap_start) + '_' + str(snap_end) + '_Tbeam.png')
                if no_plots == False:
                    plt.show()

            for snap_num in range(snaps):
                snap_start = start_buff + snaptime + (snap_num  ) * snap_depth
                snap_end   = start_buff + snaptime + (snap_num+1) * snap_depth
                fig_index += 1
                it_start = int(round((snap_start - start_buff)/dt))
                it_end   = int(round((snap_end   - start_buff)/dt))
                for slowR_i in range(slowR_n):  # loop over radial slownesses
                    for slowT_i in range(slowT_n):  # loop over transverse slownesses
                        index = slowR_i*slowT_n + slowT_i
                        num_val = np.nanmean(amp_ave[index].data[it_start:it_end])
                        stack_slice[slowR_i, slowT_i] = num_val

                y1, x1 = np.mgrid[slice(stack_Rslows[0], stack_Rslows[-1] + slow_delta, slow_delta),
                             slice(stack_Tslows[0], stack_Tslows[-1] + slow_delta, slow_delta)]

                fig, ax = plt.subplots(1, figsize=(7,0.8*7))
                c = ax.pcolormesh(x1, y1, stack_slice, cmap=plt.cm.gist_rainbow_r)
                ax.axis([x1.min(), x1.max(), y1.min(), y1.max()])
                if log_plot == True:
                    fig.colorbar(c, ax=ax, label = 'log amp')
                else:
                    fig.colorbar(c, ax=ax, label = 'linear amp')
                c = ax.scatter(pred_Eslo, pred_Nslo, color='black'  , s=50, alpha=0.75)
                c = ax.scatter(        0,         0, color='black' , s=50,  alpha=0.75)
                circle1 = plt.Circle((0, 0), 0.019, color='black', fill=False)
                ax.add_artist(circle1)  # inner core limit
                circle2 = plt.Circle((0, 0), 0.040, color='black', fill=False)
                ax.add_artist(circle2)  # outer core limit
                plt.title(f'Amp {snap_start:.0f} to {snap_end:.0f}s  {date_label1} {date_label2}  events {eq_num1}&{eq_num2}')
                if NS == True:
                    plt.xlabel('E Slowness (s/km)')
                    plt.ylabel('N Slowness (s/km)')
                else:
                    plt.xlabel('T Slowness (s/km)')
                    plt.ylabel('R Slowness (s/km)')
                plt.savefig(save_name + str(snap_start) + '_' + str(snap_end) + '_Abeam.png')
                if no_plots == False:
                    plt.show()

    #%% Wiggly plots
    if wiggly_plots == True:

        #%% -- read wiggle beams
        # Get saved event info, also used to name files
        # date_label = '2018-04-02' # date for filename
        goto = '/Users/vidale/Documents/Research/IC/Pro_files'
        os.chdir(goto)
        fname1 = 'HD' + date_label1 + '_2dstack.mseed'
        fname2 = 'HD' + date_label2 + '_2dstack.mseed'
        st1 = Stream()
        st2 = Stream()
        st1 = read(fname1)
        st2 = read(fname2)

        #%% -- Extract slices to wiggle plot, cumbersome, every slowness along slice is now selected
        #%% -- -- Collect T slowness nearest T_slow
        lowest_Tslow = 1000000
        for slow_i in range(slowT_n):
            if abs(stack_Tslows[slow_i] - T_slow_plot) < lowest_Tslow:
                lowest_Tindex = slow_i
                lowest_Tslow = abs(stack_Tslows[slow_i] - T_slow_plot)

        print(f'{slowT_n} T slownesses, index {lowest_Tindex} is closest to requested plot T slowness {T_slow_plot:.4f}, slowness diff there is {lowest_Tslow:.4f} and slowness is {stack_Tslows[lowest_Tindex]:.4f}')
        # Select only stacks with that slowness for radial plot
        centralR_st1 = Stream()
        centralR_st2 = Stream()
        centralR_amp   = Stream()
        centralR_tdiff = Stream()
        for slowR_i in range(slowR_n):
            ii = slowR_i*slowT_n + lowest_Tindex
            centralR_st1 += st1[ii]
            centralR_st2 += st2[ii]
            centralR_amp   += amp_ave[ii]
            centralR_tdiff += tdiff[ii]

        #%% -- -- Collect R slowness nearest R_slow
        lowest_Rslow = 1000000
        for slow_i in range(slowR_n):
            if abs(stack_Rslows[slow_i] - R_slow_plot) < lowest_Rslow:
                lowest_Rindex = slow_i
                lowest_Rslow = abs(stack_Rslows[slow_i] - R_slow_plot)

        print(f'{slowR_n} R slownesses, index {lowest_Rindex} is closest to requested plot R slowness {R_slow_plot:.4f}, slowness diff there is {lowest_Rslow:.4f} and slowness is {stack_Rslows[lowest_Rindex]:.4f}')

        # Select only stacks with that slowness for transverse plot
        centralT_st1 = Stream()
        centralT_st2 = Stream()
        centralT_amp   = Stream()
        centralT_tdiff = Stream()

        for slowT_i in range(slowT_n):
            ii = lowest_Rindex*slowT_n + slowT_i
            centralT_st1 += st1[ii]
            centralT_st2 += st2[ii]
            centralT_amp   += amp_ave[ii]
            centralT_tdiff += tdiff[ii]

    #%% -- Plot wiggles
        #%% -- -- Compute timing time series
        ttt_dec = (np.arange(len(tdiff[0].data)) * tdiff[0].stats.delta + start_buff) # in units of seconds

        #%% -- -- R amp and tdiff vs time plots with black line for time shift
        scale_plot_wig = wig_scale_fac / (200 * global_max)
        scale_plot_tdiff = tdiff_scale_fac / 500.
        if log_plot == True:
            scale_plot_wig /= 30  # not quite sure why this renormalization works
            # scale_plot_tdiff = plot_scale_fac / 500.
        fig_index = 116
        plt.figure(fig_index,figsize=(30,10))
        plt.xlim(start_buff,end_buff)
        plt.ylim(stack_Rslows[0], stack_Rslows[-1])
        for slowR_i in range(slowR_n):  # loop over radial slownesses
            dist_offset = stack_Rslows[slowR_i] # trying for approx degrees
            ttt1 = (np.arange(len(centralR_st1[slowR_i].data)) * centralR_st1[slowR_i].stats.delta
              + (centralR_st1[slowR_i].stats.starttime - t1))
            plt.plot(ttt1, ((centralR_st1[slowR_i].data - np.median(centralR_st1[slowR_i].data)) * scale_plot_wig) + dist_offset, color = 'green')
            plt.plot(ttt1, ((centralR_st2[slowR_i].data - np.median(centralR_st2[slowR_i].data)) * scale_plot_wig) + dist_offset, color = 'red')
            if turn_off_black == 0:
                plt.plot(ttt1,     (centralT_st1[slowT_i].data)*0.0 + dist_offset, color = 'gray') # reference lines
                plt.plot(ttt_dec, (centralR_tdiff[slowR_i].data) * scale_plot_tdiff + dist_offset, color = 'black')

        plt.xlabel('Time (s)')
        if NS == True:
            plt.ylabel('N Slowness (s/km)')
            plt.title(date_label1 + '  ' + date_label2 + '  ' + ' seismograms ' + str(T_slow_plot) + ' E slowness, green is event1, red is event2')
            plt.savefig(save_name + str(start_buff) + '_' + str(end_buff) + '_N_pro_wig.png')
        else:
            plt.ylabel('R Slowness (s/km)')
            plt.title(date_label1 + '  ' + date_label2 + '  ' + ' seismograms ' + str(T_slow_plot) + ' T slowness, green is event1, red is event2')
            plt.savefig(save_name + str(start_buff) + '_' + str(end_buff) + '_R_pro_wig.png')
        #%% -- -- T amp and tdiff vs time plots with black line for time shift
        fig_index = 117
        plt.figure(fig_index,figsize=(30,10))
        plt.xlim(start_buff,end_buff)
        plt.ylim(stack_Tslows[0], stack_Tslows[-1])

        for slowT_i in range(slowT_n):  # loop over transverse slownesses
            dist_offset = stack_Tslows[slowT_i] # trying for approx degrees
            ttt2 = (np.arange(len(centralT_st1[slowT_i].data)) * centralT_st1[slowT_i].stats.delta
              + (centralT_st1[slowT_i].stats.starttime - t1))
            plt.plot(ttt2, ((centralT_st1[slowT_i].data - np.median(centralT_st1[slowT_i].data)) * scale_plot_wig) + dist_offset, color = 'green')
            plt.plot(ttt2, ((centralT_st2[slowT_i].data - np.median(centralT_st2[slowT_i].data)) * scale_plot_wig) + dist_offset, color = 'red')
            if turn_off_black == 0:
                plt.plot(ttt2,     (centralT_st1[slowT_i].data)*0.0 + dist_offset, color = 'gray') # reference lines
                plt.plot(ttt_dec, (centralT_tdiff[slowT_i].data) * scale_plot_tdiff + dist_offset, color = 'black')
        plt.xlabel('Time (s)')
        if NS == True:
            plt.ylabel('E Slowness (s/km)')
            plt.title(date_label1 + '  ' + date_label2 + '  ' + ' seismograms ' + str(R_slow_plot) + ' N slowness, green is event1, red is event2')
            plt.savefig(save_name + str(start_buff) + '_' + str(end_buff) + '_E_pro_wig.png')
        else:
            plt.ylabel('T Slowness (s/km)')
            plt.title(date_label1 + '  ' + date_label2 + '  ' + ' seismograms ' + str(R_slow_plot) + ' R slowness, green is event1, red is event2')
            plt.savefig(save_name + str(start_buff) + '_' + str(end_buff) + '_T_pro_wig.png')

    #%% Beam sum plots
    if beam_sums == True:
    #%% -- R-T tdiff amp-normed
        stack_slice = np.zeros((slowR_n,slowT_n))

        if start_beam == 0 and end_beam == 0:
            full_beam = 1
            print('Full beam is specified.')
        else:  # beam just part of stack volume
            full_beam = 0
            start_index = int((start_beam - start_buff) / dt)
            end_index   = int((end_beam   - start_buff) / dt)
            print(f'Beam is {start_beam:.4f} to {end_beam:.4f}s, out of {start_buff:.4f} to {end_buff:.4f}s, dt is {dt:.4f}s, and indices are {start_index} {end_index}')

        for slowR_i in range(slowR_n):  # loop over radial slownesses
            for slowT_i in range(slowT_n):  # loop over transverse slownesses
                index = slowR_i*slowT_n + slowT_i
                if full_beam == 1: # using elementwise multiplication, amplitude weighted
                    num_val = np.nansum(np.multiply(tdiff[index].data, amp_ave_thres[index].data))/np.nansum(amp_ave_thres[index].data)
                else:
                    num_val = np.nansum(np.multiply(tdiff[start_index:end_index].data, amp_ave_thres[start_index:end_index].data
                                                     ))/np.nansum(amp_ave_thres[start_index:end_index].data)
                stack_slice[slowR_i, slowT_i] = num_val

        y1, x1 = np.mgrid[slice(stack_Rslows[0], stack_Rslows[-1] + slow_delta, slow_delta),
                     slice(stack_Tslows[0], stack_Tslows[-1] + slow_delta, slow_delta)]

        fig, ax = plt.subplots(1, figsize=(7,0.8*7*(slowR_n/slowT_n)))  # try to make correct aspect ratio plot
        c = ax.pcolormesh(x1, y1, stack_slice, cmap=plt.cm.coolwarm, vmin = -tdiff_clip, vmax = tdiff_clip)
        fig.colorbar(c, ax=ax, label='time shift (s)')
        ax.axis([x1.min(), x1.max(), y1.min(), y1.max()])
        circle1 = plt.Circle((0, 0), 0.019, color='black', fill=False)
        ax.add_artist(circle1)
        circle2 = plt.Circle((0, 0), 0.040, color='black', fill=False)
        ax.add_artist(circle2)  #outer core limit

        c = ax.scatter(pred_Eslo, pred_Nslo, color='black'  , s=50, alpha=0.75)
        c = ax.scatter(        0,         0, color='black' , s=50,  alpha=0.75)

        if NS == True:
            plt.ylabel('N Slowness (s/km)')
            plt.xlabel('E Slowness (s/km)')
        else:
            plt.ylabel('R Slowness (s/km)')
            plt.xlabel('T Slowness (s/km)')
        plt.title(f'{date_label1} {date_label2} {start_buff:.0f} to {end_buff:.0f} time shift')
        plt.savefig(save_name + str(start_buff) + '_' + str(end_buff) + '_Tbeam.png')
        if no_plots == False:
            plt.show()

    #%% -- R-T amplitude averaged over time window
        stack_slice = np.zeros((slowR_n,slowT_n))
        for slowR_i in range(slowR_n):  # loop over radial slownesses
            for slowT_i in range(slowT_n):  # loop over transverse slownesses
                index = slowR_i*slowT_n + slowT_i
                if full_beam == 1:
                    num_val = np.nanmean(amp_ave[index].data)
                else:
                    num_val = np.nanmean(amp_ave[index].data[start_index:end_index])
                stack_slice[slowR_i, slowT_i] = num_val

        y1, x1 = np.mgrid[slice(stack_Rslows[0], stack_Rslows[-1] + slow_delta, slow_delta),
                     slice(stack_Tslows[0], stack_Tslows[-1] + slow_delta, slow_delta)]

        fig, ax = plt.subplots(1, figsize=(7,0.8*7*(slowR_n/slowT_n)))
        smax = np.max(stack_slice)
        smin = np.min(stack_slice)
        if log_plot == True:
            if (smax - smin) < log_plot_range:  # use full color scale even if range is less than specified
                log_plot_range = smax - smin
            c = ax.pcolormesh(x1, y1, stack_slice - smax, cmap=plt.cm.gist_rainbow_r, vmin= - log_plot_range, vmax=0)
        else:
            c = ax.pcolormesh(x1, y1, stack_slice/smax, cmap=plt.cm.gist_rainbow_r, vmin = 0)
        if log_plot == True:
            fig.colorbar(c, ax=ax, label='log amplitude')
        else:
            fig.colorbar(c, ax=ax, label='linear amplitude')
        ax.axis([x1.min(), x1.max(), y1.min(), y1.max()])
        circle1 = plt.Circle((0, 0), 0.019, color='black', fill=False)
        ax.add_artist(circle1)  #inner core limit
        circle2 = plt.Circle((0, 0), 0.040, color='black', fill=False)
        ax.add_artist(circle2)  #outer core limit

        c = ax.scatter(pred_Eslo, pred_Nslo, color='black'  , s=50, alpha=0.75)
        c = ax.scatter(        0,         0, color='black' , s=50,  alpha=0.75)

        if NS == True:
            plt.xlabel('E Slowness (s/km)')
            plt.ylabel('N Slowness (s/km)')
        else:
            plt.xlabel('T Slowness (s/km)')
            plt.ylabel('R Slowness (s/km)')
        plt.title(f'{date_label1} {date_label2} {start_buff:.0f} to {end_buff:.0f} beam amp')
        plt.savefig(save_name + str(start_buff) + '_' + str(end_buff) + '_Abeam.png')
        if no_plots == False:
            plt.show()

    #  Save processed files
#    fname = 'HD' + date_label + '_slice.mseed'
#    stack.write(fname,format = 'MSEED')

    elapsed_time_wc = time.time() - start_time_wc
    print(f'Pro7 took {elapsed_time_wc:.1f} seconds')
    os.system('say "Done"')
Пример #16
0
def sac2hdf5(src_folder,
             basenames,
             channels,
             dest_h5_file,
             tt_model_id='iasp91'):
    """
    Convert collection of SAC files from a folder into a single HDF5 stream file.

    :param src_folder: Path to folder containing SAC files
    :type src_folder: str or Path
    :param basenames: List of base filenames (file name excluding extension) to load.
    :type basenames: list of str
    :param channels: List of channels to load. For each base filename in basenames,
        there is expected to be a file with each channel as the filename extension.
    :type channels: List of str
    :param dest_h5_file: Path to output file. Will be created, or overwritten if
        already exists.
    :type dest_h5_file: str or Path
    :param tt_model_id: Which travel time earth model to use for synthesizing
        trace metadata. Must be known to obspy.taup.TauPyModel
    :type tt_model_id: str
    :return: None
    """
    tt_model = TauPyModel(tt_model_id)
    traces = []
    for basename, channel in itertools.product(basenames, channels):
        fname = os.path.join(src_folder, '.'.join([basename, channel]))
        channel_stream = obspy.read(fname, 'sac')
        tr = channel_stream[0]
        event_depth_km = tr.stats.sac['evdp']
        dist_deg = tr.stats.sac['dist'] / KM_PER_DEG
        arrivals = tt_model.get_travel_times(event_depth_km, dist_deg, ('P', ))
        arrival = arrivals[0]
        inc = arrival.incident_angle
        slowness = arrival.ray_param_sec_degree
        src_dic = tr.stats.sac
        sac_tr = SACTrace(nzyear=src_dic['nzyear'],
                          nzjday=src_dic['nzjday'],
                          nzhour=src_dic['nzhour'],
                          nzmin=src_dic['nzmin'],
                          nzsec=src_dic['nzsec'],
                          nzmsec=src_dic['nzmsec'])
        onset = sac_tr.reftime
        if 'nevid' in src_dic:
            event_id = src_dic['nevid']
        else:
            event_id = basename
        # end if
        stats = {
            'distance': dist_deg,
            'back_azimuth': src_dic['baz'],
            'inclination': inc,
            'onset': onset,
            'slowness': slowness,
            'phase': 'P',
            'tt_model': tt_model_id,
            'event_id': event_id
        }
        tr.stats.update(stats)
        traces.append(tr)
    # end for

    stream_all = obspy.Stream(traces)
    stream_all.write(dest_h5_file, 'H5')
Пример #17
0
def f_vespagram_theoretical_arrivals(st, origin, smin, smax, ssteps, baz,
                                     winlen):
    '''
    Plots the F-stat vespagram for a seismic array over a given slowness range, for a single backazimuth, using the statistic specified. Also plots theoretical arrival times and slownesses for each phase.

    The chosen statistic is plotted as a function of time (in s) and slowness (in s/km). This may be:-
                                                                 
    * 'amplitude' - the raw amplitude of the linear or nth root stack at each time and slowness step;
    * 'power' - the power in the linear or nth root beam calculated over a time window (length winlen) around each time step for each slowness;
    * 'F' - the F-statistic of the beam calculated over a time window (length winlen) around each time step for each slowness.

    Parameters
    ----------
    st : ObsPy Stream object
        Stream of SAC format seismograms for the seismic array, length K = no. of stations in array
    origin : ObsPy Origin object
        Origin of the event in question. Should contain the origin time of the earthquake and if necessary the depth and location.
    smin  : float
        Minimum magnitude of slowness vector, in s / km
    smax  : float
        Maximum magnitude of slowness vector, in s / km
    ssteps  : int
        Integer number of steps between smin and smax for which to calculate the vespagram
    baz : float
        Backazimuth of slowness vector, (i.e. angle from North back to epicentre of event)
    winlen : int
        Length of Hann window over which to calculate the power.
    stat : string
        Statistic to use for plotting the vespagram, either 'amplitude', 'power', or 'F'
    display: string
        Option for plotting: either 'contourf' for filled contour plot, or 'contour' for contour plot. See matplotlib documentation for more details.
    '''

    starttime = st[0].stats.starttime
    tt_model = TauPyModel()

    # Arrivals are calculated from the information in origin.
    delta = locations2degrees(
        origin.latitude, origin.longitude, st[0].stats.sac.stla,
        st[0].stats.sac.stlo)  # Distance in degrees from source to receiver
    arrivals = tt_model.get_travel_times(origin.depth / 1000., delta)

    arrival_names = [arrival.name for arrival in arrivals]
    arrival_times = [
        origin.time + arrival.time - starttime for arrival in arrivals
    ]
    arrival_slowness = [
        arrival.ray_param_sec_degree / G_KM_DEG for arrival in arrivals
    ]

    plt.figure(figsize=(16, 8))

    vespagram = np.array(
        [f_vespa(st, s, baz, winlen) for s in np.linspace(smin, smax, ssteps)])
    label = 'F'
    timestring = str(st[0].stats.starttime.datetime)
    title = timestring + ": " + label + " Vespagram"

    plt.contourf(st[0].times(), np.linspace(smin, smax, ssteps),
                 vespagram[:, :])

    cb = plt.colorbar()
    cb.set_label(label)

    # Plot predicted arrivals
    plt.scatter(arrival_times, arrival_slowness, c='cyan', s=200, marker='+')

    plt.xlabel("Time (s)")
    plt.xlim(min(st[0].times()), max(st[0].times()))
    plt.ylim(smin, smax)

    # Thanks, Stack Overflow: http://stackoverflow.com/questions/5147112/matplotlib-how-to-put-individual-tags-for-a-scatter-plot
    for label, x, y in zip(arrival_names, arrival_times, arrival_slowness):
        plt.annotate(label,
                     xy=(x, y),
                     xytext=(-20, 20),
                     textcoords='offset points',
                     ha='right',
                     va='bottom',
                     bbox=dict(boxstyle='round,pad=0.5',
                               fc='yellow',
                               alpha=0.5),
                     arrowprops=dict(arrowstyle='->',
                                     connectionstyle='arc3,rad=0'))

    plt.ylabel("Slowness (s / km)")
    plt.title(title)
Пример #18
0
def input_chen_tele_body(tensor_info, data_prop):
    """We write some text files, which are based on teleseismic body wave data,
    as inputs for Chen's scripts.
    
    :param tensor_info: dictionary with moment tensor information
    :param data_prop: dictionary with properties of waveform data
    :type tensor_info: dict
    :type data_prop: dict
    
    .. warning::
        
        Make sure the filters of teleseismic data agree with the values in
        sampling_filter.json!
    """
    if not os.path.isfile('tele_waves.json'):
        return
    traces_info = json.load(open('tele_waves.json'))
    date_origin = tensor_info['date_origin']
    dt = traces_info[0]['dt']
    dt = round(dt, 1)
    filtro = data_prop['tele_filter']
    low_freq = filtro['low_freq']
    high_freq = filtro['high_freq']

    with open('filtro_tele', 'w') as outfile:
        outfile.write('Corners: {} {}\n'.format(low_freq, high_freq))
        outfile.write('dt: {}'.format(dt))

    nsta = len(traces_info)
    model = TauPyModel(model="ak135f_no_mud")
    depth = tensor_info['depth']

    string = '{0:2d}   FAR GDSN {1:>6} {1:>6}BHZ.DAT {2:5.2f} {3:6.2f} '\
        '{4:5.2f} {5:5.2f} {6:5.2f} {7} 0  0.0  0.0 {8}  1 0\n'
    sin_fun = lambda p: p * 3.6 / 111.12
    angle_fun = lambda p:\
    np.arctan2(sin_fun(p), np.sqrt(1 - sin_fun(p)**2)) * 180.0 / np.pi
    string_fun1 = lambda i, name, dist, az, lat, lon, p_slowness:\
    string.format(
        i, name, dist, az, lat, lon, angle_fun(p_slowness), 1.0, 0)
    string_fun2 = lambda i, name, dist, az, lat, lon, s_slowness:\
    string.format(
        i, name, dist, az, lat, lon, angle_fun(s_slowness), 4.0, 2)

    with open('Readlp.das', 'w') as outfile:
        outfile.write('30 30 30 0 0 0 0 0 0 1.1e+20\n')
        outfile.write('3 10 {}\n{}{}{}{}{}{}.{}\n{}\n'.format(
            dt, date_origin.year, date_origin.month, date_origin.day,
            date_origin.hour, date_origin.minute, date_origin.second,
            date_origin.microsecond, nsta))
        i = 0
        for file in traces_info:  #header in headers:
            name = file['name']
            comp = file['component']
            lat, lon = file['location']
            dist = file['distance']
            az = file['azimuth']
            arrivals = mng.theoretic_arrivals(model, dist, depth)
            p_slowness = arrivals['p_slowness']
            s_slowness = arrivals['s_slowness']
            if comp == 'BHZ':
                outfile.write(
                    string_fun1(i + 1, name, dist, az, lat, lon, p_slowness))
            else:
                outfile.write(
                    string_fun2(i + 1, name, dist, az, lat, lon, s_slowness))
            i = i + 1

    with open('Wave.tele', 'w') as file1, open('Obser.tele', 'w') as file2:
        write_files_wavelet_observed(file1, file2, dt, data_prop, traces_info)


#
# instrumental response common to all body waves
#
    string2 = '\n3\n' + '0. 0.\n' * 3 + '4\n-6.17E-03  6.17E-03\n'\
              '-6.17E-03 -6.17E-03\n-39.18    49.12\n-39.18   '\
              '-49.12\n3948\n'
    with open('instrumental_response', 'w') as outfile:
        outfile.write('{}\n'.format(nsta))
        outfile.write(string2 * len(traces_info))

    write_wavelet_freqs(dt, 'Wavelets_tele_body')

    with open('Weight', 'w') as outfile:
        for info in traces_info:
            sta = info['name']
            comp = info['component']
            weight = info['trace_weight']
            outfile.write('{} {} {}\n'.format(weight, sta, comp))
    return 'tele_body'
Пример #19
0
from obspy.taup import TauPyModel
from numpy import genfromtxt,arange,meshgrid,ones,zeros,save,array
from pyproj import Geod
from obspy.geodetics import kilometer2degrees
from scipy.linalg import norm
from obspy import read,UTCDateTime

model = TauPyModel(model="spica")
lonlat=genfromtxt('/Users/dmelgar/Slip_inv/puebla/data/station_info/sm.gflist',usecols=[1,2])
sta=genfromtxt('/Users/dmelgar/Slip_inv/puebla/data/station_info/sm.gflist',usecols=[0],dtype='S')

# coarse ettings
#time_epi=[UTCDateTime('2017-09-19T18:14:37'),UTCDateTime('2017-09-19T18:14:38'),UTCDateTime('2017-09-19T18:14:39'),UTCDateTime('2017-09-19T18:14:40'),UTCDateTime('2017-09-19T18:14:41'),UTCDateTime('2017-09-19T18:14:42')]
#xsource=arange(-99,-98.3,0.05)
#ysource=arange(18.2,18.6,0.05)
#zsource=arange(30,60,2)

# fine ettings
#time_epi=[UTCDateTime('2017-09-19T18:14:35.5'),UTCDateTime('2017-09-19T18:14:36'),UTCDateTime('2017-09-19T18:14:36.5'),UTCDateTime('2017-09-19T18:14:37'),UTCDateTime('2017-09-19T18:14:37.5'),UTCDateTime('2017-09-19T18:14:38')]
#xsource=arange(-98.7,-98.6,0.01)
#ysource=arange(18.15,18.35,0.01)
#zsource=arange(55,58,1)

#time_epi=[UTCDateTime('2017-09-19T18:14:32'),UTCDateTime('2017-09-19T18:14:33'),UTCDateTime('2017-09-19T18:14:34'),UTCDateTime('2017-09-19T18:14:35'),UTCDateTime('2017-09-19T18:14:36'),UTCDateTime('2017-09-19T18:14:37')]
#xsource=array([-98.65])
#ysource=array([18.22])
#zsource=arange(55,70,2)

time_epi=[UTCDateTime('2017-09-19T18:14:35.5'),UTCDateTime('2017-09-19T18:14:36'),UTCDateTime('2017-09-19T18:14:36.5'),UTCDateTime('2017-09-19T18:14:37'),UTCDateTime('2017-09-19T18:14:37.5'),UTCDateTime('2017-09-19T18:14:38')]
xsource=arange(-98.7,-98.6,0.01)
ysource=arange(18.15,18.35,0.01)
Пример #20
0
def rfstats(obj=None,
            event=None,
            station=None,
            phase='P',
            dist_range='default',
            tt_model='iasp91',
            pp_depth=None,
            pp_phase=None,
            model='iasp91'):
    """
    Calculate ray specific values like slowness for given event and station.

    :param obj: `~obspy.core.trace.Stats` object with event and/or station
        attributes. Can be None if both event and station are given.
        It is possible to specify a stream object, too. Then, rfstats will be
        called for each Trace.stats object and traces outside dist_range will
        be discarded.
    :param event: ObsPy `~obspy.core.event.event.Event` object
    :param station: station object with attributes latitude, longitude and
        elevation
    :param phase: string with phase. Usually this will be 'P' or
        'S' for P and S receiver functions, respectively.
    :type dist_range: tuple of length 2
    :param dist_range: if epicentral of event is not in this intervall, None
        is returned by this function,\n
        if phase == 'P' defaults to (30, 90),\n
        if phase == 'S' defaults to (50, 85)
    :param tt_model: model for travel time calculation.
        (see the `obspy.taup` module, default: iasp91)
    :param pp_depth: Depth for piercing point calculation
        (in km, default: None -> No calculation)
    :param pp_phase: Phase for pp calculation (default: 'S' for P-receiver
        function and 'P' for S-receiver function)
    :param model: Path to model file for pp calculation
        (see `.SimpleModel`, default: iasp91)
    :return: `~obspy.core.trace.Stats` object with event and station
        attributes, distance, back_azimuth, inclination, onset and
        slowness or None if epicentral distance is not in the given interval.
        Stream instance if stream was specified instead of stats.
    """
    if isinstance(obj, (Stream, RFStream)):
        stream = obj
        kwargs = {
            'event': event,
            'station': station,
            'phase': phase,
            'dist_range': dist_range,
            'tt_model': tt_model,
            'pp_depth': pp_depth,
            'pp_phase': pp_phase,
            'model': model
        }
        traces = []
        for tr in stream:
            if rfstats(tr.stats, **kwargs) is not None:
                traces.append(tr)
        stream.traces = traces
        return stream
    if dist_range == 'default' and phase.upper() in 'PS':
        dist_range = (30, 90) if phase.upper() == 'P' else (50, 85)
    stats = AttribDict({}) if obj is None else obj
    if event is not None and station is not None:
        stats.update(obj2stats(event=event, station=station))
    dist, baz, _ = gps2dist_azimuth(stats.station_latitude,
                                    stats.station_longitude,
                                    stats.event_latitude,
                                    stats.event_longitude)
    dist = dist / 1000 / DEG2KM
    if dist_range and not dist_range[0] <= dist <= dist_range[1]:
        return
    tt_model = TauPyModel(model=tt_model)
    arrivals = tt_model.get_travel_times(stats.event_depth, dist, (phase, ))
    if len(arrivals) == 0:
        raise Exception('TauPy does not return phase %s at distance %s' %
                        (phase, dist))
    if len(arrivals) > 1:
        msg = ('TauPy returns more than one arrival for phase %s at '
               'distance -> take first arrival')
        warnings.warn(msg % (phase, dist))
    arrival = arrivals[0]
    onset = stats.event_time + arrival.time
    inc = arrival.incident_angle
    slowness = arrival.ray_param_sec_degree
    stats.update({
        'distance': dist,
        'back_azimuth': baz,
        'inclination': inc,
        'onset': onset,
        'slowness': slowness,
        'phase': phase
    })
    if pp_depth is not None:
        model = load_model(model)
        if pp_phase is None:
            pp_phase = 'S' if phase.upper().endswith('P') else 'P'
        model.ppoint(stats, pp_depth, phase=pp_phase)
    return stats
Пример #21
0
import argparse
import datetime
import io
import math
import numpy as np
from obspy.clients.fdsn import Client as FDSN_Client
from obspy.taup import TauPyModel
from obspy.io.quakeml.core import Unpickler
import pycurl
import pyproj
import xml.etree.ElementTree as ET

# Set up objects to use imported modules
client = FDSN_Client("https://service.geonet.org.nz")
spherical_velocity_model = TauPyModel(model="iasp91")
quakeml_reader = Unpickler()

# Set up pyproj coordinate system objects
proj_wgs84_geog = pyproj.Proj(init="epsg:4326")
proj_wgs84_geod = pyproj.Proj(init="epsg:4978")


def parse_files(
        arrival_time_file=None,
        eventid_file=None,
        test_origins=None,
        network_file=None,
        velocity_model=None,
        grid_parameters=None,
        grid_file=None,
Пример #22
0
def pro5stack(eq_file, plot_scale_fac = 0.05, slowR_lo = -0.1, slowR_hi = 0.1,
			  slow_delta = 0.0005, start_buff = -50, end_buff = 50,
			  ref_lat = 36.3, ref_lon = 138.5, envelope = 1, plot_dyn_range = 1000,
			  log_plot = 1, norm = 1, global_norm_plot = 1, color_plot = 1, fig_index = 401, ARRAY = 0):

#%% Import functions
	import obspy
	import obspy.signal
	from obspy import UTCDateTime
	from obspy import Stream, Trace
	from obspy import read
	from obspy.geodetics import gps2dist_azimuth
	import numpy as np
	import os
	from obspy.taup import TauPyModel
	import obspy.signal as sign
	import matplotlib.pyplot as plt
	from matplotlib.colors import LogNorm
	model = TauPyModel(model='iasp91')
	from scipy.signal import hilbert
	import math
	import time

#	import sys # don't show any warnings
#	import warnings

	print('Running pro5a_stack')

#%% Get saved event info, also used to name files
	start_time_wc = time.time()

	if ARRAY == 0:
		file = open(eq_file, 'r')
	elif ARRAY == 1:
		file = open('EvLocs/' + eq_file, 'r')
	lines=file.readlines()
	split_line = lines[0].split()
#			ids.append(split_line[0])  ignore label for now
	t           = UTCDateTime(split_line[1])
	date_label  = split_line[1][0:10]
	ev_lat      = float(      split_line[2])
	ev_lon      = float(      split_line[3])
	ev_depth    = float(      split_line[4])

	#if not sys.warnoptions:
	#    warnings.simplefilter("ignore")

#%% Get station location file
	if ARRAY == 0: # Hinet set and center
		sta_file = '/Users/vidale/Documents/GitHub/Array_codes/Files/hinet_sta.txt'
		ref_lat = 36.3
		ref_lon = 138.5
	elif ARRAY == 1: # LASA set and center
		sta_file = '/Users/vidale/Documents/GitHub/Array_codes/Files/LASA_sta.txt'
		ref_lat = 46.69
		ref_lon = -106.22
	else:         # NORSAR set and center if 2
		sta_file = '/Users/vidale/Documents/GitHub/Array_codes/Files/NORSAR_sta.txt'
		ref_lat = 61
		ref_lon = 11
	with open(sta_file, 'r') as file:
		lines = file.readlines()
	print(str(len(lines)) + ' stations read from ' + sta_file)
	# Load station coords into arrays
	station_index = range(len(lines))
	st_names = []
	st_lats  = []
	st_lons  = []
	for ii in station_index:
		line = lines[ii]
		split_line = line.split()
		st_names.append(split_line[0])
		st_lats.append( split_line[1])
		st_lons.append( split_line[2])

#%% Name file, read data
	# date_label = '2018-04-02' # date for filename
	if ARRAY == 0:
		fname = 'HD' + date_label + 'sel.mseed'
	elif ARRAY == 1:
		fname = 'Pro_Files/HD' + date_label + 'sel.mseed'
	st = Stream()
	print('reading ' + fname)
	st = read(fname)
	print('Read in: ' + str(len(st)) + ' traces')
	nt = len(st[0].data)
	dt = st[0].stats.delta
	print('First trace has : ' + str(nt) + ' time pts, time sampling of '
		  + str(dt) + ' and thus duration of ' + str((nt-1)*dt))

#%% Build Stack arrays
	stack = Stream()
	tr = Trace()
	tr.stats.delta = dt
	tr.stats.network = 'stack'
	tr.stats.channel = 'BHZ'
	slow_n = int(1 + (slowR_hi - slowR_lo)/slow_delta)  # number of slownesses
	stack_nt = int(1 + ((end_buff - start_buff)/dt))  # number of time points
	# In English, stack_slows = range(slow_n) * slow_delta - slowR_lo
	a1 = range(slow_n)
	stack_slows = [(x * slow_delta + slowR_lo) for x in a1]
	print(str(slow_n) + ' slownesses.')
	tr.stats.starttime = t + start_buff
	tr.data = np.zeros(stack_nt)
	done = 0
	for stack_one in stack_slows:
		tr1 = tr.copy()
		tr1.stats.station = str(int(done))
		stack.extend([tr1])
		done += 1
	#	stack.append([tr])
	#	stack += tr

	#  Only need to compute ref location to event distance once
	ref_distance = gps2dist_azimuth(ev_lat,ev_lon,ref_lat,ref_lon)

#%% Select traces by distance, window and adjust start time to align picked times
	done = 0
	for tr in st: # traces one by one, find lat-lon by searching entire inventory.  Inefficient but cheap
		for ii in station_index:
			if ARRAY == 0:  # for hi-net, have to chop off last letter, always 'h'
				this_name = st_names[ii]
				this_name_truc = this_name[0:5]
				name_truc_cap  = this_name_truc.upper()
			elif ARRAY == 1:
				name_truc_cap = st_names[ii]
			if (tr.stats.station == name_truc_cap): # find station in inventory
				if norm == 1:
					tr.normalize()
#					tr.normalize(norm= -len(st)) # mystery command or error
				stalat = float(st_lats[ii])
				stalon = float(st_lons[ii]) # look up lat & lon again to find distance
				distance = gps2dist_azimuth(stalat,stalon,ev_lat,ev_lon) # Get traveltimes again, hard to store
				tr.stats.distance=distance[0] # distance in m
				del_dist = (ref_distance[0] - distance[0])/(1000) # in km
				# ALSO NEEDS distance station - hypocenter calculation
				#isolate components of distance in radial and transverse directions, ref_distR & ref_distT
				# FIX ref_distR = distance*cos(azi-backazi)
				# FIX ref_distT = distance*sin(azi-backazi)
	#			for(k=0;k<nslow;k++){
	#				slow = 110.*(LOWSLOW + k*DELTASLOW);
				for slow_i in range(slow_n):  # for this station, loop over slownesses
					time_lag = -del_dist * stack_slows[slow_i]  # time shift due to slowness, flipped to match 2D
#					start_offset = tr.stats.starttime - t
#					time_correction = (start_buff - (start_offset + time_lag))/dt
					time_correction = ((t-tr.stats.starttime) + (time_lag + start_buff))/dt
	#				print('Time lag ' + str(time_lag) + ' for slowness ' + str(stack_slows[slow_i]) + ' and distance ' + str(del_dist) + ' time sample correction is ' + str(time_correction))
					for it in range(stack_nt):  # check points one at a time
						it_in = int(it + time_correction)
						if it_in >= 0 and it_in < nt - 1: # does data lie within seismogram?
							stack[slow_i].data[it] += tr[it_in]
				done += 1
				if done%50 == 0:
					print('Done stacking ' + str(done) + ' out of ' + str(len(st)) + ' stations.')
#%% Plot traces
	global_max = 0
	for slow_i in range(slow_n): # find global max, and if requested, take envelope
		if len(stack[slow_i].data) == 0:
				print('%d data has zero length ' % (slow_i))
		if envelope == 1 or color_plot == 1:
			stack[slow_i].data = np.abs(hilbert(stack[slow_i].data))
		local_max = max(abs(stack[slow_i].data))
		if local_max > global_max:
			global_max = local_max
	if global_max <= 0:
		print('global_max ' + str(global_max) + ' slow_n ' + str(slow_n))

	# create time axis (x-axis), use of slow_i here is arbitrary, oops
	ttt = (np.arange(len(stack[slow_i].data)) * stack[slow_i].stats.delta +
		 (stack[slow_i].stats.starttime - t)) # in units of seconds

	# Plotting
	if color_plot == 1: # 2D color plot
		stack_array = np.zeros((slow_n,stack_nt))

	#	stack_array = np.random.rand(int(slow_n),int(stack_nt))  # test with random numbers
		min_allowed = global_max/plot_dyn_range
		if log_plot == 1:
			for it in range(stack_nt):  # check points one at a time
				for slow_i in range(slow_n):  # for this station, loop over slownesses
					num_val = stack[slow_i].data[it]
					if num_val < min_allowed:
						num_val = min_allowed
					stack_array[slow_i, it] = math.log10(num_val) - math.log10(min_allowed)
		else:
			for it in range(stack_nt):  # check points one at a time
				for slow_i in range(slow_n):  # for this station, loop over slownesses
					stack_array[slow_i, it] = stack[slow_i].data[it]/global_max
		y, x = np.mgrid[slice(stack_slows[0], stack_slows[-1] + slow_delta, slow_delta),
					 slice(ttt[0], ttt[-1] + dt, dt)]  # make underlying x-y grid for plot
	#	y, x = np.mgrid[ stack_slows , time ]  # make underlying x-y grid for plot
		plt.close(fig_index)

		fig, ax = plt.subplots(1, figsize=(9,2))
		fig.subplots_adjust(bottom=0.3)
#		c = ax.pcolormesh(x, y, stack_array, cmap=plt.cm.gist_yarg)
#		c = ax.pcolormesh(x, y, stack_array, cmap=plt.cm.gist_rainbow_r)
		c = ax.pcolormesh(x, y, stack_array, cmap=plt.cm.binary)
		ax.axis([x.min(), x.max(), y.min(), y.max()])
		fig.colorbar(c, ax=ax)
		plt.figure(fig_index,figsize=(6,8))
		plt.close(fig_index)
	else: # line plot
		for slow_i in range(slow_n):
			dist_offset = stack_slows[slow_i] # in units of slowness
			if global_norm_plot != 1:
				plt.plot(ttt, stack[slow_i].data*plot_scale_fac / (stack[slow_i].data.max()
			- stack[slow_i].data.min()) + dist_offset, color = 'black')
			else:
				plt.plot(ttt, stack[slow_i].data*plot_scale_fac / (global_max
			- stack[slow_i].data.min()) + dist_offset, color = 'black')
		plt.ylim(slowR_lo,slowR_hi)
		plt.xlim(start_buff,end_buff)
	plt.xlabel('Time (s)')
	plt.ylabel('Slowness (s/km)')
	plt.title(date_label)
	os.chdir('/Users/vidale/Documents/PyCode/LASA/Quake_results/Plots')
	plt.savefig(date_label + '_' + str(start_buff) + '_' + str(end_buff) + '_1D.png')
	plt.show()

#%% Save processed files
	print('Stack has ' + str(len(stack)) + ' traces')
#
#	if ARRAY == 0:
#		goto = '/Users/vidale/Documents/PyCode/Hinet'
#	if ARRAY == 1:
#		goto = '/Users/vidale/Documents/PyCode/LASA/Pro_Files'
#	os.chdir(goto)
#	fname = 'HD' + date_label + '_1dstack.mseed'
#	stack.write(fname,format = 'MSEED')

	elapsed_time_wc = time.time() - start_time_wc
	print('This job took ' + str(elapsed_time_wc) + ' seconds')
	os.system('say "Done"')
Пример #23
0
def pro4statics(eq_file,
                use_ref_trace=0,
                ref_trace='nothing',
                event_no=0,
                dphase='PcP',
                dphase2='PKiKP',
                dphase3='P',
                dphase4='PP',
                start_beam=-1,
                end_beam=3,
                plot_scale_fac=0.05,
                start_buff=-10,
                end_buff=30,
                qual_threshold=0,
                corr_threshold=0,
                max_time_shift=2,
                min_dist=17,
                max_dist=21,
                ARRAY=0,
                auto_dist=1):

    from obspy import UTCDateTime
    from obspy.signal.cross_correlation import xcorr_pick_correction
    from obspy import Stream
    from obspy import Trace
    from obspy import read
    from obspy.geodetics import gps2dist_azimuth
    import numpy as np
    import os
    import sys
    from obspy.taup import TauPyModel
    import matplotlib.pyplot as plt
    model = TauPyModel(model='iasp91')

    import warnings  # don't show any warnings
    if not sys.warnoptions:
        warnings.simplefilter("ignore")

    print('pro4_get_shifts is starting')

    #%% Get station location file
    if ARRAY == 0:  # Hinet set
        sta_file = '/Users/vidale/Documents/GitHub/Array_codes/Files/sta_hinet.txt'
    elif ARRAY == 1:  # LASA set
        sta_file = '/Users/vidale/Documents/GitHub/Array_codes/Files/sta_LASA.txt'
    elif ARRAY == 2:  # China set
        sta_file = '/Users/vidale/Documents/GitHub/Array_codes/Files/sta_ch.txt'
    with open(sta_file, 'r') as file:
        lines = file.readlines()
    print('    ' + str(len(lines)) + ' stations read from ' + sta_file)
    # Load station coords into arrays
    # old line: station_index = range(343)
    station_index = range(len(lines))
    st_lats = []
    st_lons = []
    st_deps = []
    st_names = []
    for ii in station_index:
        line = lines[ii]
        split_line = line.split()
        st_names.append(split_line[0])
        st_lats.append(split_line[1])
        st_lons.append(split_line[2])
        st_deps.append(split_line[3])

    if ARRAY == 0:  # stupid kludge to reduce Hi-net names by one letter and equalize capitalization
        for ii in station_index:
            tested_name = st_names[ii]
            this_name_truc = tested_name[0:5]
            name_truc_cap = this_name_truc.upper()
            st_names[ii] = name_truc_cap

    # initialize lists of statics
    sta_names = []
    sta_dists = []
    sta_lats = []
    sta_lons = []
    sta_statics = []
    sta_corrs = []

    #%% Parameter list
    #dphase  = 'PKIKP'       # phase to be aligned
    #dphase2 = 'PKiKP'      # another phase to have traveltime plotted
    #dphase3 = 'PKP'        # another phase to have traveltime plotted
    #dphase4 = 'pP'        # another phase to have traveltime plotted
    #ref_trace = 'N.SZW'   # trace with reference waveform
    #start_beam = 2       # start of correlation window (more positive is earlier)
    start_beam = -start_beam
    #end_beam   = 7       # plots end Xs before PKiKP
    #max_time_shift = 2       # searches up to this time shift for alignment
    #corr_threshold = 0.  # threshold that correlation is good enough to keep trace
    #max_dist = 151
    #min_dist = 150.6
    #plot_scale_fac = 0.2    #  Bigger numbers make each trace amplitude bigger on plot
    #qual_threshold =  0 # minimum SNR
    plot_tt = True  # plot the traveltimes?
    plot_flag = False  # plot for each trace?  Watch out, can be lots, one for each station pair!!
    min_dist_auto = 180  # for use in auto-scaling y axis in trace gathers
    max_dist_auto = 0

    #%% Get saved event info, also used to name files
    #  event 2016-05-28T09:47:00.000 -56.241 -26.935 78
    file = open('/Users/vidale/Documents/PyCode/EvLocs/' + eq_file, 'r')
    lines = file.readlines()
    split_line = lines[0].split()
    #            ids.append(split_line[0])  ignore label, now "event"
    t = UTCDateTime(split_line[1])
    date_label = split_line[1][0:10]
    ev_lat = float(split_line[2])
    ev_lon = float(split_line[3])
    ev_depth = float(split_line[4])

    print('        Date label ' + date_label + ' lat ' + str(ev_lat) +
          ' lon ' + str(ev_lon))

    st = Stream()
    #    fname     = 'HD' + date_label + '.mseed'
    fname = 'HD' + date_label + 'sel.mseed'  # sel file has windowing, shift?, filtering

    print('        File ' + fname)

    os.chdir('/Users/vidale/Documents/PyCode/Pro_Files/')
    os.system('pwd')
    st = read(fname)
    print('    ' + str(len(st)) + '  traces read in')
    print('         First trace has : ' + str(len(st[0].data)) + ' time pts ')

    #%% Reference trace
    trim_start = t + start_buff
    trim_end = t + end_buff
    time_buff = end_buff - start_buff
    tr_ref = Trace()
    #%% Stack reference trace
    if use_ref_trace == 0:
        counter = 0
        for tr in st:  # loop over seismograms to find reference trace, put it in tr_ref
            if counter == 0:  # copy first trace to stack
                tr_ref = tr.copy()
                tr_ref.stats.station = 'STACK'
                tr_ref.trim(starttime=trim_start - time_buff,
                            endtime=trim_end + time_buff)
                nt_ref = len(tr_ref.data)
                tr_ref.normalize()
                counter = counter + 1
            else:  # add the rest of the traces to stack
                tr_add = tr.copy()
                tr_add.trim(starttime=trim_start - time_buff,
                            endtime=trim_end + time_buff)
                nt_add = len(tr_ref.data)
                tr_add.normalize()

                for it in range(nt_ref):  # add seismogram one point at a time
                    if nt_ref != nt_add:  # are seismograms the same length?
                        print(
                            'trying to stack seismograms of different lengths, debug!'
                        )
                    tr_ref.data[it] += tr_add[it]
                counter = counter + 1
        tr_ref.data = tr_ref.data / counter

    #%% Pick reference trace
    if use_ref_trace == 1:
        for tr in st:  # loop over seismograms to find reference trace, put it in tr_ref
            if (tr.stats.station == ref_trace):  # found it
                tr_ref = tr.copy()
                tr_ref.trim(starttime=trim_start - time_buff,
                            endtime=trim_end + time_buff)
                nt_ref = len(tr_ref.data)
                tr_ref.normalize()
                print('        found reference station ' + tr.stats.station)
    if len(tr_ref.data) == 0:
        sys.exit('Reference trace empty, will not work!')

    #%% Plot reference trace
    plt.close(4)
    plt.figure(4, figsize=(10, 10))
    plt.xlim(start_buff, end_buff)
    plt.ylim(min(tr_ref.data), max(tr_ref.data))

    time = np.arange(nt_ref) * tr_ref.stats.delta + start_buff
    plt.plot(time, tr_ref.data, color='black')
    plt.xlabel('Time (s)')
    if use_ref_trace == 1:
        plt.title('Reference trace ' + dphase + ' for ' + fname[2:12] + '  ' +
                  ref_trace)
        plt.ylabel('Normed amp')
    else:
        plt.title('Summed reference trace ' + dphase + ' for ' + fname[2:12] +
                  '   ' + str(event_no))
        plt.ylabel('Average amp, each trace normed to 1')
    plt.show()

    stgood = Stream()
    st2 = st.copy(
    )  # hard to measure timing of traces without adjusting entire thing
    # print('st2 has: ' + str(len(st)) + ' traces' + ' t (origin time) ' + str(t))
    print('        Ref time ' + str(t) +
          ' start_beam end_beam max_time_shift ' + str(start_beam) + '  ' +
          str(end_beam) + '  ' + str(max_time_shift) + '  ')

    #  get station lat-lon, compute distance for plot
    good_corr = 0
    bad_corr = 0
    for tr in st:  # do all seismograms
        if tr.stats.station in st_names:  # find station in inventory
            ii = st_names.index(tr.stats.station)
            #  print('found Station ' + this_name + '  ' + actual_trace)
            stalon = float(st_lons[ii])  # look up lat & lon to find distance
            stalat = float(st_lats[ii])
            distance = gps2dist_azimuth(stalat, stalon, ev_lat, ev_lon)
            tr.stats.distance = distance[0] / (
                1000. * 111)  # distance for phase time and plotting

            if tr.stats.distance < min_dist_auto:  # for auto-scaling y-axis in trace gather plots
                min_dist_auto = tr.stats.distance
            if tr.stats.distance > max_dist_auto:
                max_dist_auto = tr.stats.distance

            arrivals = model.get_travel_times(
                source_depth_in_km=ev_depth,
                distance_in_degree=tr.stats.distance,
                phase_list=[dphase])
            #                 print(tr.stats.station + '  ' + tr_ref.stats.station + ' start_corr ' +
            #                    str(start_beam) + ' end ' + str(end_beam))
            try:
                dt, coeff = xcorr_pick_correction(t,
                                                  tr_ref,
                                                  t,
                                                  tr,
                                                  start_beam,
                                                  end_beam,
                                                  max_time_shift,
                                                  plot=plot_flag)
                if dt > max_time_shift:
                    print('Hey!  Excess shift: %.3f' % dt)
                    print('Station ' + tr.stats.station + ' corr is ' +
                          str(coeff))
                if coeff > 1:
                    print('Hey!  Excess coeff: %.3f' % coeff)
                    print('Station ' + tr.stats.station + ' corr is ' +
                          str(coeff))
                if coeff > corr_threshold:
                    good_corr += 1
                    if plot_flag == True:
                        print('Time correction for pick 2: %.6f' % dt)
                        print('Correlation coefficient: %.2f' % coeff)
                    tr.stats.starttime -= dt
                    sta_names.extend([tr.stats.station])
                    sta_dists.extend([tr.stats.distance])
                    sta_lats.extend([stalat])
                    sta_lons.extend([stalon])
                    sta_statics.extend([dt])
                    sta_corrs.extend([coeff])
                    stgood += tr
                else:
                    bad_corr += 1
            except:
                print('        No time shift for ' + tr.stats.station +
                      ' at distance ' + str(tr.stats.distance))

    ##        # store shift to write out
    ##            if coeff > corr_threshold:
    #            # write out station_name, dt, coeff
    #            # record shifted waveform in stgood
    print('    ' + str(good_corr) + ' traces with good correlation')
    if (good_corr == 0):
        sys.exit('No traces is a failure')
    print('    ' + str(bad_corr) + '  traces with bad correlation')
    print('    ' + str(good_corr + bad_corr) + ' out of total')
    print('        corr threshhold is ' + str(corr_threshold))

    plt.close(5)
    plt.figure(5, figsize=(10, 10))
    plt.xlim(start_buff, end_buff)
    plt.ylim(min_dist, max_dist)

    if auto_dist == 1:
        dist_diff = max_dist_auto - min_dist_auto  # add space at extremes
        plt.ylim(min_dist_auto - 0.1 * dist_diff,
                 max_dist_auto + 0.1 * dist_diff)
    else:
        plt.ylim(min_dist, max_dist)

    for tr in stgood:
        dist_offset = tr.stats.distance  # trying for approx degrees
        time = np.arange(len(tr.data)) * tr.stats.delta + (tr.stats.starttime -
                                                           t)
        plt.plot(time, (tr.data - np.median(tr.data)) * plot_scale_fac /
                 (tr.data.max() - tr.data.min()) + dist_offset,
                 color='black')

    #%% Plot before shift
    if plot_tt:
        # first traveltime curve
        line_pts = 50
        dist_vec = np.arange(min_dist, max_dist,
                             (max_dist - min_dist) / line_pts)  # distance grid
        time_vec1 = np.arange(
            min_dist, max_dist, (max_dist - min_dist) /
            line_pts)  # empty time grid of same length (filled with -1000)
        for i in range(0, line_pts):
            arrivals = model.get_travel_times(source_depth_in_km=ev_depth,
                                              distance_in_degree=dist_vec[i],
                                              phase_list=[dphase])
            num_arrivals = len(arrivals)
            found_it = 0
            for j in range(0, num_arrivals):
                if arrivals[j].name == dphase:
                    time_vec1[i] = arrivals[j].time
                    found_it = 1
            if found_it == 0:
                time_vec1[i] = np.nan
    # second traveltime curve
        if dphase2 != 'no':
            time_vec2 = np.arange(
                min_dist, max_dist, (max_dist - min_dist) /
                line_pts)  # empty time grid of same length (filled with -1000)
            for i in range(0, line_pts):
                arrivals = model.get_travel_times(
                    source_depth_in_km=ev_depth,
                    distance_in_degree=dist_vec[i],
                    phase_list=[dphase2])
                num_arrivals = len(arrivals)
                found_it = 0
                for j in range(0, num_arrivals):
                    if arrivals[j].name == dphase2:
                        time_vec2[i] = arrivals[j].time
                        found_it = 1
                if found_it == 0:
                    time_vec2[i] = np.nan
            plt.plot(time_vec2, dist_vec, color='orange')
        # third traveltime curve
        if dphase3 != 'no':
            time_vec3 = np.arange(
                min_dist, max_dist, (max_dist - min_dist) /
                line_pts)  # empty time grid of same length (filled with -1000)
            for i in range(0, line_pts):
                arrivals = model.get_travel_times(
                    source_depth_in_km=ev_depth,
                    distance_in_degree=dist_vec[i],
                    phase_list=[dphase3])
                num_arrivals = len(arrivals)
                found_it = 0
                for j in range(0, num_arrivals):
                    if arrivals[j].name == dphase3:
                        time_vec3[i] = arrivals[j].time
                        found_it = 1
                if found_it == 0:
                    time_vec3[i] = np.nan
            plt.plot(time_vec3, dist_vec, color='yellow')
        # fourth traveltime curve
        if dphase4 != 'no':
            time_vec4 = np.arange(
                min_dist, max_dist, (max_dist - min_dist) /
                line_pts)  # empty time grid of same length (filled with -1000)
            for i in range(0, line_pts):
                arrivals = model.get_travel_times(
                    source_depth_in_km=ev_depth,
                    distance_in_degree=dist_vec[i],
                    phase_list=[dphase4])
                num_arrivals = len(arrivals)
                found_it = 0
                for j in range(0, num_arrivals):
                    if arrivals[j].name == dphase4:
                        time_vec4[i] = arrivals[j].time
                        found_it = 1
                if found_it == 0:
                    time_vec4[i] = np.nan
            plt.plot(time_vec4, dist_vec, color='purple')

        plt.plot(time_vec1, dist_vec, color='blue')
        plt.show()

    plt.xlabel('Time (s)')
    plt.ylabel('Epicentral distance from event (°)')
    plt.title('Post-alignment ' + dphase + ' for ' + fname[2:12] + '   ' +
              str(event_no))
    plt.show()

    # plot traces
    plt.close(6)
    plt.figure(6, figsize=(10, 10))
    plt.xlim(start_buff, end_buff)
    plt.ylim(min_dist, max_dist)

    if auto_dist == 1:
        dist_diff = max_dist_auto - min_dist_auto  # add space at extremes
        plt.ylim(min_dist_auto - 0.1 * dist_diff,
                 max_dist_auto + 0.1 * dist_diff)
        max_dist = max_dist_auto
        min_dist = min_dist_auto
    else:
        plt.ylim(min_dist, max_dist)

    for tr in st2:  # regenerate distances into st2 as they were loaded into st for plots
        if tr.stats.station in st_names:  # find station in station list
            ii = st_names.index(tr.stats.station)
            stalon = float(st_lons[ii])  # look up lat & lon to find distance
            stalat = float(st_lats[ii])
            distance = gps2dist_azimuth(stalat, stalon, ev_lat, ev_lon)
            tr.stats.distance = distance[0] / (
                1000. * 111)  # distance for phase time and plotting

    for tr in st2:  # generate plot
        dist_offset = tr.stats.distance  # trying for approx degrees
        time = np.arange(len(tr.data)) * tr.stats.delta + (tr.stats.starttime -
                                                           t)
        plt.plot(time, (tr.data - np.median(tr.data)) * plot_scale_fac /
                 (tr.data.max() - tr.data.min()) + dist_offset,
                 color='black')

    #%% Plot after shift
    if plot_tt:
        # first traveltime curve
        line_pts = 50
        dist_vec = np.arange(min_dist, max_dist,
                             (max_dist - min_dist) / line_pts)  # distance grid
        time_vec1 = np.arange(
            min_dist, max_dist, (max_dist - min_dist) /
            line_pts)  # empty time grid of same length (filled with -1000)
        for i in range(0, line_pts):
            arrivals = model.get_travel_times(source_depth_in_km=ev_depth,
                                              distance_in_degree=dist_vec[i],
                                              phase_list=[dphase])
            num_arrivals = len(arrivals)
            found_it = 0
            for j in range(0, num_arrivals):
                if arrivals[j].name == dphase:
                    time_vec1[i] = arrivals[j].time
                    found_it = 1
            if found_it == 0:
                time_vec1[i] = np.nan
    # second traveltime curve
        if dphase2 != 'no':
            time_vec2 = np.arange(
                min_dist, max_dist, (max_dist - min_dist) /
                line_pts)  # empty time grid of same length (filled with -1000)
            for i in range(0, line_pts):
                arrivals = model.get_travel_times(
                    source_depth_in_km=ev_depth,
                    distance_in_degree=dist_vec[i],
                    phase_list=[dphase2])
                num_arrivals = len(arrivals)
                found_it = 0
                for j in range(0, num_arrivals):
                    if arrivals[j].name == dphase2:
                        time_vec2[i] = arrivals[j].time
                        found_it = 1
                if found_it == 0:
                    time_vec2[i] = np.nan
            plt.plot(time_vec2, dist_vec, color='orange')
        # third traveltime curve
        if dphase3 != 'no':
            time_vec3 = np.arange(
                min_dist, max_dist, (max_dist - min_dist) /
                line_pts)  # empty time grid of same length (filled with -1000)
            for i in range(0, line_pts):
                arrivals = model.get_travel_times(
                    source_depth_in_km=ev_depth,
                    distance_in_degree=dist_vec[i],
                    phase_list=[dphase3])
                num_arrivals = len(arrivals)
                found_it = 0
                for j in range(0, num_arrivals):
                    if arrivals[j].name == dphase3:
                        time_vec3[i] = arrivals[j].time
                        found_it = 1
                if found_it == 0:
                    time_vec3[i] = np.nan
            plt.plot(time_vec3, dist_vec, color='yellow')
        # fourth traveltime curve
        if dphase4 != 'no':
            time_vec4 = np.arange(
                min_dist, max_dist, (max_dist - min_dist) /
                line_pts)  # empty time grid of same length (filled with -1000)
            for i in range(0, line_pts):
                arrivals = model.get_travel_times(
                    source_depth_in_km=ev_depth,
                    distance_in_degree=dist_vec[i],
                    phase_list=[dphase4])
                num_arrivals = len(arrivals)
                found_it = 0
                for j in range(0, num_arrivals):
                    if arrivals[j].name == dphase4:
                        time_vec4[i] = arrivals[j].time
                        found_it = 1
                if found_it == 0:
                    time_vec4[i] = np.nan
            plt.plot(time_vec4, dist_vec, color='purple')

        plt.plot(time_vec1, dist_vec, color='blue')
        plt.show()

    plt.xlabel('Time (s)')
    plt.ylabel('Epicentral distance from event (°)')
    plt.title('Pre-alignment ' + dphase + ' for ' + fname[2:12] + '   ' +
              str(event_no))
    plt.show()

    #  Save stats
    fname_stats = '/Users/vidale/Documents/PyCode/Mseed/fine_statics.txt'

    #  Save station static correction files
    #fname_stats = 'Statics' + etime[:10] + dphase + ref_trace + '.txt'
    stats_file = open(fname_stats, 'w')
    len_file1 = len(sta_names)
    for j in range(0, len_file1):
        dist_str = '{:.2f}'.format(
            sta_dists[j])  # 3 digits after decimal place
        lat_str = '{:.4f}'.format(sta_lats[j])  # 2 digits after decimal place
        lon_str = '{:.4f}'.format(sta_lons[j])
        stat_str = '{:.3f}'.format(sta_statics[j])
        corr_str = '{:.3f}'.format(sta_corrs[j])
        write_line = sta_names[
            j] + ' ' + dist_str + ' ' + lat_str + ' ' + lon_str + ' ' + stat_str + ' ' + corr_str + '\n'
        stats_file.write(write_line)
    file.close()
    # print('    ' + str(len_file1) + '  traces are in correlation file')


#     os.system('say "Done"')
Пример #24
0
def preprocess(filelst, freqmin=0.05, freqmax=5.0, evdp_unit="m", sample_rate=40):

    # try:
    #     os.makedirs("data")

    model = TauPyModel(model="ak135")

    if evdp_unit=="m":
        evdp = 1000.0
    elif evdp_unit=="km":
        evdp = 1.0
    else:
        evdp = 1.0

    filelst.sort()


    for file in filelst:

        print "Pre-processing:", file

        st = read(file)
        tr = st[0]

        # pre-processing data
        tr1 = tr.copy()
        tr1.detrend(type="linear")
        tr1.detrend(type="demean")
        tr1.taper(type="cosine", max_percentage=0.05, side="both")
        tr1.filter(type="bandpass", freqmin=freqmin, freqmax=freqmax)

        if tr.stats.sampling_rate!=sample_rate:
            tr1.interpolate(sampling_rate=sample_rate)

        # calculating the traveltime of P, S
        arrivals = model.get_travel_times(source_depth_in_km=tr1.stats.sac.evdp/evdp,
                                          distance_in_degree=tr1.stats.sac.gcarc,
                                          phase_list=["P", "S"])

        for arr in arrivals:
            if arr.name == "P":
                tr1.stats.sac.t1 = arr.time + tr1.stats.sac.o
                tr1.stats.sac.kt1 = "P"
                tr1.stats.sac.user1 = arr.ray_param*math.pi/180.0/111.195
                tr1.stats.sac.kuser1 = "P"
            elif arr.name =="S":
                tr1.stats.sac.t2 = arr.time + tr1.stats.sac.o
                tr1.stats.sac.kt2 = "S"
                tr1.stats.sac.user2 = arr.ray_param*math.pi/180.0/111.195
                tr1.stats.sac.kuser2 = "S"

        # event origin time
        evt_origin = tr1.stats.starttime + tr1.stats.sac.o - tr1.stats.sac.b
        o = str(evt_origin)
        for s in ["-", "T", ":"]:
            o = o.replace(s, "_")

        fn = "Event_"+o[:19]+"."+tr1.stats.channel+".sac"

        # print o

        # save sac files
        folder = "SDI/data/" + ".".join([tr1.stats.network, tr1.stats.station])

        if tr1.stats.location!="":
            folder = folder+"."+tr1.stats.location

        # print folder


        try:
            os.makedirs(folder)
        except:
            pass

        # print folder

        # print tr1.stats.channel
        fn = folder+"/"+fn
        tr1.write(fn, format="SAC")

        # return

    pass
Пример #25
0
import h5py
import numpy as np
import copy

from matplotlib.transforms import Affine2D
import mpl_toolkits.axisartist.floating_axes as floating_axes
import mpl_toolkits.axisartist.angle_helper as angle_helper
from matplotlib.projections import PolarAxes
from mpl_toolkits.axisartist.grid_finder import (FixedLocator, MaxNLocator,
                                                  DictFormatter)
from seispy.data import phase_window
from matplotlib import pyplot as plt
import obspy.signal.filter
import obspy.signal
from obspy.taup import TauPyModel
model = TauPyModel(model="prem50")
from matplotlib import colors, ticker, cm
from matplotlib.patches import Polygon
from matplotlib.colors import LogNorm
from matplotlib.colors import ListedColormap
import os
from scipy.optimize import curve_fit
import math
from matplotlib.colors import LightSource
from mpl_toolkits.basemap import Basemap
from cycler import cycler
import multiprocessing
import shutil
from seispy import mapplot
from seispy import data
from seispy import convert
Пример #26
0
def preprocess(filelst, freqmin=0.05, freqmax=5.0, evdp_unit="m", sample_rate=40):
    """
    Perform preprocessing of data, including dedtrend, taper, filter and calculate
    theoretical travel time for P and S.

    The theoretical P and S traveltime would be saved in header t1 and t2.

    The processed SAC file would write into the SDI/data/NET.STA/

    :param filelst:
    :param freqmin: min fre, default 0.05
    :param freqmax: max fre, default 5.0
    :param evdp_unit: "km" or "m", default km
    :param sample_rate: interpolate the traces in the the given sampling rate, default 40
    :return: none
    """


    # try:
    #     os.makedirs("data")

    model = TauPyModel(model="ak135")

    if evdp_unit=="m":
        evdp = 1000.0
    elif evdp_unit=="km":
        evdp = 1.0
    else:
        evdp = 1.0

    filelst.sort()


    for file in filelst:

        print "Pre-processing:", file

        try:
            st = read(file)
            tr = st[0]
        except:
            continue

        # pre-processing data
        tr1 = tr.copy()
        tr1.detrend(type="linear")
        tr1.detrend(type="demean")
        tr1.taper(type="cosine", max_percentage=0.05, side="both")
        tr1.filter(type="bandpass", freqmin=freqmin, freqmax=freqmax, zerophase=True)

        if tr.stats.sampling_rate!=sample_rate:
            # tr1.interpolate(sampling_rate=sample_rate)
            tr1.resample(sampling_rate=sample_rate)

        # calculating the traveltime of P, S
        arrivals = model.get_travel_times(source_depth_in_km=tr1.stats.sac.evdp/evdp,
                                          distance_in_degree=tr1.stats.sac.gcarc,
                                          phase_list=["P", "S"])

        for arr in arrivals:
            if arr.name == "P":
                tr1.stats.sac.t1 = arr.time + tr1.stats.sac.o
                tr1.stats.sac.kt1 = "P"
                tr1.stats.sac.user1 = arr.ray_param*math.pi/180.0/111.195
                tr1.stats.sac.kuser1 = "P"
            elif arr.name =="S":
                tr1.stats.sac.t2 = arr.time + tr1.stats.sac.o
                tr1.stats.sac.kt2 = "S"
                tr1.stats.sac.user2 = arr.ray_param*math.pi/180.0/111.195
                tr1.stats.sac.kuser2 = "S"

        # event origin time
        evt_origin = tr1.stats.starttime + tr1.stats.sac.o - tr1.stats.sac.b
        o = str(evt_origin)
        for s in ["-", "T", ":"]:
            o = o.replace(s, "_")

        # fn = "Event_"+o[:19]+"."+tr1.stats.channel+".sac"

        temp = evt_origin.datetime
        fn = ".".join([temp.strftime("%Y.%j.%H.%M.%S"), "0000", tr1.id, "M", "SAC"])

        # print o

        # save sac files
        folder = "SDI/data/" + ".".join([tr1.stats.network, tr1.stats.station])

        if tr1.stats.location!="":
            folder = folder+"."+tr1.stats.location

        # print folder


        try:
            os.makedirs(folder)
        except:
            pass

        # print folder

        # print tr1.stats.channel
        fn = folder+"/"+fn
        tr1.write(fn, format="SAC")

        # return

    pass
Пример #27
0
def stochastic_simulation(home,
                          project_name,
                          rupture_name,
                          sta,
                          sta_lon,
                          sta_lat,
                          component,
                          model_name,
                          rise_time_depths,
                          moho_depth_in_km,
                          total_duration=100,
                          hf_dt=0.01,
                          stress_parameter=50,
                          kappa=0.04,
                          Qexp=0.6,
                          Pwave=False,
                          high_stress_depth=1e4):
    '''
    Run stochastic HF sims
    
    stress parameter is in bars
    '''

    from numpy import genfromtxt, pi, logspace, log10, mean, where, exp, arange, zeros, argmin, rad2deg, arctan2, real
    from pyproj import Geod
    from obspy.geodetics import kilometer2degrees
    from obspy.taup import TauPyModel
    from mudpy.forward import get_mu, write_fakequakes_hf_waveforms_one_by_one, read_fakequakes_hypo_time
    from obspy import Stream, Trace
    from sys import stdout
    import warnings

    #print out what's going on:
    out = '''Running with input parameters:
    home = %s
    project_name = %s
    rupture_name = %s
    sta = %s
    sta_lon = %s
    sta_lat = %s
    model_name = %s
    rise_time_depths = %s
    moho_depth_in_km = %s
    total_duration = %s
    hf_dt = %s
    stress_parameter = %s
    kappa = %s
    Qexp = %s
    component = %s
    Pwave = %s
    high_stress_depth = %s
    ''' % (home, project_name, rupture_name, sta, str(sta_lon), str(sta_lat),
           model_name, str(rise_time_depths), str(moho_depth_in_km),
           str(total_duration), str(hf_dt), str(stress_parameter), str(kappa),
           str(Qexp), str(component), str(Pwave), str(high_stress_depth))
    print(out)

    #    rupture=rupture_name.split('.')[0]+'.'+rupture_name.split('.')[1]
    #    log=home+project_name+'/output/waveforms/'+rupture+'/'+sta+'.HN'+component+'.1cpu.log'
    #    logfile=open(log,'w')
    #    logfile.write(out)
    #print 'stress is '+str(stress_parameter)

    #I don't condone it but this cleans up the warnings
    warnings.filterwarnings("ignore")

    #Load the source
    fault = genfromtxt(home + project_name + '/output/ruptures/' +
                       rupture_name)

    #Onset times for each subfault
    onset_times = fault[:, 12]

    #load velocity structure
    structure = genfromtxt(home + project_name + '/structure/' + model_name)

    #Frequencies vector
    f = logspace(log10(hf_dt), log10(1 / (2 * hf_dt)) + 0.01, 50)
    omega = 2 * pi * f

    #Output time vector (0 is origin time)
    t = arange(0, total_duration, hf_dt)

    #Projection object for distance calculations
    g = Geod(ellps='WGS84')

    #Create taup velocity model object, paste on top of iaspei91
    #taup_create.build_taup_model(home+project_name+'/structure/bbp_norcal.tvel',output_folder=home+project_name+'/structure/')
    velmod = TauPyModel(model=home + project_name + '/structure/iquique',
                        verbose=True)
    #Get epicentral time
    epicenter, time_epi = read_fakequakes_hypo_time(home, project_name,
                                                    rupture_name)

    #Moments
    slip = (fault[:, 8]**2 + fault[:, 9]**2)**0.5
    subfault_M0 = slip * fault[:, 10] * fault[:, 11] * fault[:, 13]
    subfault_M0 = subfault_M0 * 1e7  #to dyne-cm
    M0 = subfault_M0.sum()
    relative_subfault_M0 = subfault_M0 / M0
    Mw = (2. / 3) * (log10(M0 * 1e-7) - 9.1)

    #Corner frequency scaling
    i = where(slip > 0)[0]  #Non-zero faults
    N = len(i)  #number of subfaults
    dl = mean((fault[:, 10] + fault[:, 11]) / 2)  #predominant length scale
    dl = dl / 1000  # to km

    #Tau=p perturbation
    tau_perturb = 0.1

    #Deep faults receive a higher stress
    stress_multiplier = 3

    print('... working on ' + component +
          ' component semistochastic waveform for station ' + sta)

    #initalize output seismogram
    tr = Trace()
    tr.stats.station = sta
    tr.stats.delta = hf_dt
    tr.stats.starttime = time_epi
    #info for sac header (added at the end)
    az, backaz, dist_m = g.inv(epicenter[0], epicenter[1], sta_lon, sta_lat)
    dist_in_km = dist_m / 1000.

    hf = zeros(len(t))

    #    out='''Parameters before we get into subfault calculations:
    #    rupture_name = %s
    #    epicenter = %s
    #    time_epi = %s
    #    M0 = %E
    #    Mw = %10.4f
    #    Num_Subfaults = %i
    #    dl = %.2f
    #    Dist_in_km = %10.4f
    #    '''%(rupture_name,str(epicenter),str(time_epi),M0,Mw,int(N),dl,dist_in_km)
    #    print out
    #    logfile.write(out)

    #Loop over subfaults
    #    earliestP=1e10  #something outrageously high
    #    earliestP_kfault=1e10
    for kfault in range(len(fault)):

        #Print status to screen
        if kfault % 150 == 0:
            if kfault == 0:
                stdout.write('      [')
                stdout.flush()
            stdout.write('.')
            stdout.flush()
        if kfault == len(fault) - 1:
            stdout.write(']\n')
            stdout.flush()

        #Include only subfaults with non-zero slip
        if subfault_M0[kfault] > 0:

            #Get subfault to station distance
            lon_source = fault[kfault, 1]
            lat_source = fault[kfault, 2]
            azimuth, baz, dist = g.inv(lon_source, lat_source, sta_lon,
                                       sta_lat)
            dist_in_degs = kilometer2degrees(dist / 1000.)

            #Source depth?
            z_source = fault[kfault, 3]

            #No change
            stress = stress_parameter

            #Is subfault in an SMGA?
            #radius_in_km=15.0
            #smga_center_lon=-69.709200
            #smga_center_lat=-19.683600
            #in_smga=is_subfault_in_smga(lon_source,lat_source,smga_center_lon,smga_center_lat,radius_in_km)
            #
            ###Apply multiplier?
            #if in_smga==True:
            #    stress=stress_parameter*stress_multiplier
            #    print "%.4f,%.4f is in SMGA, stress is %d" % (lon_source,lat_source,stress)
            #else:
            #    stress=stress_parameter

            #Apply multiplier?
            #if slip[kfault]>7.5:
            #    stress=stress_parameter*stress_multiplier
            ##elif lon_source>-72.057 and lon_source<-71.2 and lat_source>-30.28:
            ##    stress=stress_parameter*stress_multiplier
            #else:
            #    stress=stress_parameter

            #Apply multiplier?
            #if z_source>high_stress_depth:
            #    stress=stress_parameter*stress_multiplier
            #else:
            #    stress=stress_parameter

            # Frankel 95 scaling of corner frequency #verified this looks the same in GP
            # Right now this applies the same factor to all faults
            fc_scale = (M0) / (N * stress * dl**3 * 1e21)  #Frankel scaling
            small_event_M0 = stress * dl**3 * 1e21

            #Get rho, alpha, beta at subfault depth
            zs = fault[kfault, 3]
            mu, alpha, beta = get_mu(structure, zs, return_speeds=True)
            rho = mu / beta**2

            #Get radiation scale factor
            Spartition = 1 / 2**0.5
            if component == 'N':
                component_angle = 0
            elif component == 'E':
                component_angle = 90

            rho = rho / 1000  #to g/cm**3
            beta = (beta / 1000) * 1e5  #to cm/s
            alpha = (alpha / 1000) * 1e5

            #Verified this produces same value as in GP
            CS = (2 * Spartition) / (4 * pi * (rho) * (beta**3))
            CP = 2 / (4 * pi * (rho) * (alpha**3))

            #Get local subfault rupture speed
            beta = beta / 100  #to m/s
            vr = get_local_rupture_speed(zs, beta, rise_time_depths)
            vr = vr / 1000  #to km/s
            dip_factor = get_dip_factor(fault[kfault, 5], fault[kfault, 8],
                                        fault[kfault, 9])

            #Subfault corner frequency
            c0 = 2.0  #GP2015 value
            fc_subfault = (c0 * vr) / (dip_factor * pi * dl)

            #get subfault source spectrum
            #S=((relative_subfault_M0[kfault]*M0/N)*f**2)/(1+fc_scale*(f/fc_subfault)**2)
            S = small_event_M0 * (omega**2 / (1 + (f / fc_subfault)**2))
            frankel_conv_operator = fc_scale * (
                (fc_subfault**2 + f**2) / (fc_subfault**2 + fc_scale * f**2))
            S = S * frankel_conv_operator

            #get high frequency decay
            P = exp(-pi * kappa * f)

            #get quarter wavelength amplificationf actors
            # pass rho in kg/m^3 (this units nightmare is what I get for following Graves' code)
            I = get_amplification_factors(f, structure, zs, beta, rho * 1000)

            #            if kfault==0:
            #                out='''Parameters within subfault calculations:
            #                kfault_lon = %10.4f
            #                kfault_lat = %10.4f
            #                CS = %s
            #                CP = %s
            #                S[0] = %s
            #                frankel_conv_operator[0] = %s
            #                '''%(fault[kfault,1],fault[kfault,2],str(CS),str(CP),str(S[0]),str(frankel_conv_operator[0]))
            #                print out
            #                logfile.write(out)

            #Get other geometric parameters necessar for radiation pattern
            strike = fault[kfault, 4]
            dip = fault[kfault, 5]
            ss = fault[kfault, 8]
            ds = fault[kfault, 9]
            rake = rad2deg(arctan2(ds, ss))

            #Get ray paths for all direct P arrivals
            Ppaths = velmod.get_ray_paths(zs,
                                          dist_in_degs,
                                          phase_list=['P', 'p'])

            #Get ray paths for all direct S arrivals
            try:
                Spaths = velmod.get_ray_paths(zs,
                                              dist_in_degs,
                                              phase_list=['S', 's'])
            except:
                Spaths = velmod.get_ray_paths(zs + tau_perturb,
                                              dist_in_degs,
                                              phase_list=['S', 's'])

            #sometimes there's no S, weird I know. Check twice.
            if len(Spaths) == 0:
                Spaths = velmod.get_ray_paths(zs + tau_perturb,
                                              dist_in_degs,
                                              phase_list=['S', 's'])
            if len(Spaths) == 0:
                Spaths = velmod.get_ray_paths(zs + 5 * tau_perturb,
                                              dist_in_degs,
                                              phase_list=['S', 's'])
            if len(Spaths) == 0:
                Spaths = velmod.get_ray_paths(zs - 5 * tau_perturb,
                                              dist_in_degs,
                                              phase_list=['S', 's'])
            if len(Spaths) == 0:
                Spaths = velmod.get_ray_paths(zs + 5 * tau_perturb,
                                              dist_in_degs,
                                              phase_list=['S', 's'])
            if len(Spaths) == 0:
                Spaths = velmod.get_ray_paths(zs - 10 * tau_perturb,
                                              dist_in_degs,
                                              phase_list=['S', 's'])
            if len(Spaths) == 0:
                Spaths = velmod.get_ray_paths(zs + 10 * tau_perturb,
                                              dist_in_degs,
                                              phase_list=['S', 's'])
            if len(Spaths) == 0:
                Spaths = velmod.get_ray_paths(zs - 50 * tau_perturb,
                                              dist_in_degs,
                                              phase_list=['S', 's'])
            if len(Spaths) == 0:
                Spaths = velmod.get_ray_paths(zs + 50 * tau_perturb,
                                              dist_in_degs,
                                              phase_list=['S', 's'])
            if len(Spaths) == 0:
                Spaths = velmod.get_ray_paths(zs - 75 * tau_perturb,
                                              dist_in_degs,
                                              phase_list=['S', 's'])
            if len(Spaths) == 0:
                Spaths = velmod.get_ray_paths(zs + 75 * tau_perturb,
                                              dist_in_degs,
                                              phase_list=['S', 's'])
            if len(Spaths) == 0:
                print(
                    'ERROR: I give up, no direct S in spite of multiple attempts at subfault '
                    + str(kfault))

            #Get direct s path and moho reflection
            mohoS = None
            directS = Spaths[0]
            directP = Ppaths[0]
            #print len(Spaths)
            if len(Spaths) == 1:  #only direct S
                pass
            else:
                #turn_depth=zeros(len(Spaths)-1) #turning depth of other non-direct rays
                #for k in range(1,len(Spaths)):
                #    turn_depth[k-1]=Spaths[k].path['depth'].max()
                ##If there's a ray that turns within 2km of Moho, callt hat guy the Moho reflection
                #deltaz=abs(turn_depth-moho_depth_in_km)
                #i=argmin(deltaz)
                #if deltaz[i]<2: #Yes, this is a moho reflection
                #    mohoS=Spaths[i+1]
                #else:
                #    mohoS=None
                mohoS = Spaths[-1]

            #######         Build Direct P ray           ######
            if Pwave == True:
                take_off_angle_P = directP.takeoff_angle

                #Get attenuation due to geometrical spreading (from the path length)
                path_length_P = get_path_length(directP, zs, dist_in_degs)
                path_length_P = path_length_P * 100  #to cm

                #Get effect of intrinsic attenuation for that ray (path integrated)
                Q_P = get_attenuation(f, structure, directS, Qexp, Qtype='P')

                #Build the entire path term
                G_P = (I * Q_P) / path_length_P

                #Get conically averaged radiation pattern terms
                RP = conically_avg_P_radiation_pattern(strike, dip, rake,
                                                       azimuth,
                                                       take_off_angle_P)
                RP = abs(RP)

                #Get partition of Pwave into Z and N,E components
                incidence_angle = directP.incident_angle
                Npartition, Epartition, Zpartition = get_P_wave_partition(
                    incidence_angle, azimuth)
                if component == 'Z':
                    Ppartition = Zpartition
                elif component == 'N':
                    Ppartition = Npartition
                else:
                    Ppartition = Epartition

                #And finally multiply everything together to get the subfault amplitude spectrum
                AP = CP * S * G_P * P * RP * Ppartition

                #Generate windowed time series
                duration = 1. / fc_subfault + 0.09 * (dist / 1000)
                w = windowed_gaussian(duration,
                                      hf_dt,
                                      window_type='saragoni_hart')

                #Go to frequency domain, apply amplitude spectrum and ifft for final time series
                hf_seis_P = apply_spectrum(w, AP, f, hf_dt)

                #What time after OT should this time series start at?
                time_insert = directP.path['time'][-1] + onset_times[kfault]
                #                if directP.time+onset_times[kfault] < earliestP:
                #                    earliestP=directP.time+onset_times[kfault]
                #                    earliestP_kfault=kfault
                i = argmin(abs(t - time_insert))
                j = i + len(hf_seis_P)

                #Check seismogram doesn't go past last sample
                if i < len(
                        hf
                ) - 1:  #if i (the beginning of the seimogram) is less than the length
                    if j > len(
                            hf
                    ):  #seismogram goes past total_duration length, trim it
                        len_paste = len(hf) - i
                        j = len(hf)
                        #Add seismogram
                        hf[i:j] = hf[i:j] + real(hf_seis_P[0:len_paste])
                    else:  #Lengths are fine
                        hf[i:j] = hf[i:j] + real(hf_seis_P)
                else:  #Seismogram starts after end of available space
                    pass

            #######         Build Direct S ray           ######
            take_off_angle_S = directS.takeoff_angle

            #Get attenuation due to geometrical spreading (from the path length)
            path_length_S = get_path_length(directS, zs, dist_in_degs)
            path_length_S = path_length_S * 100  #to cm

            #Get effect of intrinsic aptimeenuation for that ray (path integrated)
            Q_S = get_attenuation(f, structure, directS, Qexp)

            #Build the entire path term
            G_S = (I * Q_S) / path_length_S

            #Get conically averaged radiation pattern terms
            if component == 'Z':
                RP_vert = conically_avg_vert_radiation_pattern(
                    strike, dip, rake, azimuth, take_off_angle_S)
                #And finally multiply everything together to get the subfault amplitude spectrum
                AS = CS * S * G_S * P * RP_vert
            else:
                RP = conically_avg_radiation_pattern(strike, dip, rake,
                                                     azimuth, take_off_angle_S,
                                                     component_angle)
                RP = abs(RP)
                #And finally multiply everything together to get the subfault amplitude spectrum
                AS = CS * S * G_S * P * RP

            #Generate windowed time series
            duration = 1. / fc_subfault + 0.063 * (dist / 1000)
            w = windowed_gaussian(duration, hf_dt, window_type='saragoni_hart')
            #w=windowed_gaussian(3*duration,hf_dt,window_type='cua',ptime=Ppaths[0].path['time'][-1],stime=Spaths[0].path['time'][-1])

            #Go to frequency domain, apply amplitude spectrum and ifft for final time series
            hf_seis_S = apply_spectrum(w, AS, f, hf_dt)

            #What time after OT should this time series start at?
            time_insert = directS.path['time'][-1] + onset_times[kfault]
            #print 'ts = '+str(time_insert)+' , Td = '+str(duration)
            #time_insert=Ppaths[0].path['time'][-1]
            i = argmin(abs(t - time_insert))
            j = i + len(hf_seis_S)

            #Check seismogram doesn't go past last sample
            if i < len(
                    hf
            ) - 1:  #if i (the beginning of the seimogram) is less than the length
                if j > len(
                        hf
                ):  #seismogram goes past total_duration length, trim it
                    len_paste = len(hf) - i
                    j = len(hf)
                    #Add seismogram
                    hf[i:j] = hf[i:j] + real(hf_seis_S[0:len_paste])
                else:  #Lengths are fine
                    hf[i:j] = hf[i:j] + real(hf_seis_S)
            else:  #Beginning of seismogram is past end of available space
                pass

            #######         Build Moho reflected S ray           ######


#            if mohoS==None:
#                pass
#            else:
#                if kfault%100==0:
#                    print '... ... building Moho reflected S wave'
#                take_off_angle_mS=mohoS.takeoff_angle
#
#                #Get attenuation due to geometrical spreading (from the path length)
#                path_length_mS=get_path_length(mohoS,zs,dist_in_degs)
#                path_length_mS=path_length_mS*100 #to cm
#
#                #Get effect of intrinsic aptimeenuation for that ray (path integrated)
#                Q_mS=get_attenuation(f,structure,mohoS,Qexp)
#
#                #Build the entire path term
#                G_mS=(I*Q_mS)/path_length_mS
#
#                #Get conically averaged radiation pattern terms
#                if component=='Z':
#                    RP_vert=conically_avg_vert_radiation_pattern(strike,dip,rake,azimuth,take_off_angle_mS)
#                    #And finally multiply everything together to get the subfault amplitude spectrum
#                    A=C*S*G_mS*P*RP_vert
#                else:
#                    RP=conically_avg_radiation_pattern(strike,dip,rake,azimuth,take_off_angle_mS,component_angle)
#                    RP=abs(RP)
#                    #And finally multiply everything together to get the subfault amplitude spectrum
#                    A=C*S*G_mS*P*RP
#
#                #Generate windowed time series
#                duration=1./fc_subfault+0.063*(dist/1000)
#                w=windowed_gaussian(duration,hf_dt,window_type='saragoni_hart')
#                #w=windowed_gaussian(3*duration,hf_dt,window_type='cua',ptime=Ppaths[0].path['time'][-1],stime=Spaths[0].path['time'][-1])
#
#                #Go to frequency domain, apply amplitude spectrum and ifft for final time series
#                hf_seis=apply_spectrum(w,A,f,hf_dt)
#
#                #What time after OT should this time series start at?
#                time_insert=mohoS.path['time'][-1]+onset_times[kfault]
#                #print 'ts = '+str(time_insert)+' , Td = '+str(duration)
#                #time_insert=Ppaths[0].path['time'][-1]
#                i=argmin(abs(t-time_insert))
#                j=i+len(hf_seis)
#
#                #Add seismogram
#                hf[i:j]=hf[i:j]+hf_seis
#
#                #Done, reset
#                mohoS=None
#        if kfault==0:
#            out=''' More:
#            fc_scale = %10.4f
#            subfaultM0 = %E
#            mu = %E
#            CS = %E
#            CP = %E
#            vr = %10.4f
#            dip_factor = %10.4f
#            fc_subfault = %10.4f
#            directS = %s
#            directP = %s
#            '''%(fc_scale,subfault_M0[kfault],mu,CS,CP,vr,dip_factor,fc_subfault,str(directS.time),str(directP.time))
#            print out
#            logfile.write(out)
#    logfile.close()
#Done
    tr.data = hf / 100  #convert to m/s**2
    #Add station location, event location, and first P-wave arrival time to SAC header
    tr.stats.update({
        'sac': {
            'stlo': sta_lon,
            'stla': sta_lat,
            'evlo': epicenter[0],
            'evla': epicenter[1],
            'evdp': epicenter[2],
            'dist': dist_in_km,
            'az': az,
            'baz': backaz,
            'mag': Mw
        }
    })  #,'idep':"ACC (m/s^2)" not sure why idep won't work
    #Return trace for writing to file
    #    print "Earliest P wave Comes at " + str(earliestP) + "after OT, from location " + str(fault[earliestP_kfault,1]) + ", " + str(fault[earliestP_kfault,2]) + ", " +str(fault[earliestP_kfault,3])
    return tr
Пример #28
0
def select_windows(data_trace,
                   synthetic_trace,
                   event_latitude,
                   event_longitude,
                   event_depth_in_km,
                   station_latitude,
                   station_longitude,
                   minimum_period,
                   maximum_period,
                   min_cc=0.10,
                   max_noise=0.10,
                   max_noise_window=0.4,
                   min_velocity=2.4,
                   threshold_shift=0.30,
                   threshold_correlation=0.75,
                   min_length_period=1.5,
                   min_peaks_troughs=2,
                   max_energy_ratio=10.0,
                   min_envelope_similarity=0.2,
                   verbose=False,
                   plot=False):
    """
    Window selection algorithm for picking windows suitable for misfit
    calculation based on phase differences.

    Returns a list of windows which might be empty due to various reasons.

    This function is really long and a lot of things. For a more detailed
    description, please see the LASIF paper.

    :param data_trace: The data trace.
    :type data_trace: :class:`~obspy.core.trace.Trace`
    :param synthetic_trace: The synthetic trace.
    :type synthetic_trace: :class:`~obspy.core.trace.Trace`
    :param event_latitude: The event latitude.
    :type event_latitude: float
    :param event_longitude: The event longitude.
    :type event_longitude: float
    :param event_depth_in_km: The event depth in km.
    :type event_depth_in_km: float
    :param station_latitude: The station latitude.
    :type station_latitude: float
    :param station_longitude: The station longitude.
    :type station_longitude: float
    :param minimum_period: The minimum period of the data in seconds.
    :type minimum_period: float
    :param maximum_period: The maximum period of the data in seconds.
    :type maximum_period: float
    :param min_cc: Minimum normalised correlation coefficient of the
        complete traces.
    :type min_cc: float
    :param max_noise: Maximum relative noise level for the whole trace.
        Measured from maximum amplitudes before and after the first arrival.
    :type max_noise: float
    :param max_noise_window: Maximum relative noise level for individual
        windows.
    :type max_noise_window: float
    :param min_velocity: All arrivals later than those corresponding to the
        threshold velocity [km/s] will be excluded.
    :type min_velocity: float
    :param threshold_shift: Maximum allowable time shift within a window,
        as a fraction of the minimum period.
    :type threshold_shift: float
    :param threshold_correlation: Minimum normalised correlation coeeficient
        within a window.
    :type threshold_correlation: float
    :param min_length_period: Minimum length of the time windows relative to
        the minimum period.
    :type min_length_period: float
    :param min_peaks_troughs: Minimum number of extrema in an individual
        time window (excluding the edges).
    :type min_peaks_troughs: float
    :param max_energy_ratio: Maximum energy ratio between data and
        synthetics within a time window. Don't make this too small!
    :type max_energy_ratio: float
    :param min_envelope_similarity: The minimum similarity of the envelopes of
        both data and synthetics. This essentially assures that the
        amplitudes of data and synthetics can not diverge too much within a
        window. It is a bit like the inverse of the ratio of both envelopes
        so a value of 0.2 makes sure neither amplitude can be more then 5
        times larger than the other.
    :type min_envelope_similarity: float
    :param verbose: No output by default.
    :type verbose: bool
    :param plot: Create a plot of the algortihm while it does its work.
    :type plot: bool
    """
    # Shortcuts to frequently accessed variables.
    data_starttime = data_trace.stats.starttime
    data_delta = data_trace.stats.delta
    dt = data_trace.stats.delta
    npts = data_trace.stats.npts
    synth = synthetic_trace.data
    data = data_trace.data
    times = data_trace.times()

    # Fill cache if necessary.
    if not TAUPY_MODEL_CACHE:
        from obspy.taup import TauPyModel  # NOQA
        TAUPY_MODEL_CACHE["model"] = TauPyModel("AK135")
    model = TAUPY_MODEL_CACHE["model"]

    # -------------------------------------------------------------------------
    # Geographical calculations and the time of the first arrival.
    # -------------------------------------------------------------------------
    dist_in_deg = geodetics.locations2degrees(station_latitude,
                                              station_longitude,
                                              event_latitude, event_longitude)
    dist_in_km = geodetics.calc_vincenty_inverse(
        station_latitude, station_longitude, event_latitude,
        event_longitude)[0] / 1000.0

    # Get only a couple of P phases which should be the first arrival
    # for every epicentral distance. Its quite a bit faster than calculating
    # the arrival times for every phase.
    # Assumes the first sample is the centroid time of the event.
    tts = model.get_travel_times(source_depth_in_km=event_depth_in_km,
                                 distance_in_degree=dist_in_deg,
                                 phase_list=["ttp"])
    # Sort just as a safety measure.
    tts = sorted(tts, key=lambda x: x.time)
    first_tt_arrival = tts[0].time

    # -------------------------------------------------------------------------
    # Window settings
    # -------------------------------------------------------------------------
    # Number of samples in the sliding window. Currently, the length of the
    # window is set to a multiple of the dominant period of the synthetics.
    # Make sure it is an uneven number; just to have a trivial midpoint
    # definition and one sample does not matter much in any case.
    window_length = int(round(float(2 * minimum_period) / dt))
    if not window_length % 2:
        window_length += 1

    # Use a Hanning window. No particular reason for it but its a well-behaved
    # window and has nice spectral properties.
    taper = np.hanning(window_length)

    # =========================================================================
    # check if whole seismograms are sufficiently correlated and estimate
    # noise level
    # =========================================================================

    # Overall Correlation coefficient.
    norm = np.sqrt(np.sum(data**2)) * np.sqrt(np.sum(synth**2))
    cc = np.sum(data * synth) / norm
    if verbose:
        _log_window_selection(data_trace.id,
                              "Correlation Coefficient: %.4f" % cc)

    # Estimate noise level from waveforms prior to the first arrival.
    idx_end = int(np.ceil((first_tt_arrival - 0.5 * minimum_period) / dt))
    idx_end = max(10, idx_end)
    idx_start = int(np.ceil((first_tt_arrival - 2.5 * minimum_period) / dt))
    idx_start = max(10, idx_start)

    if idx_start >= idx_end:
        idx_start = max(0, idx_end - 10)

    abs_data = np.abs(data)
    noise_absolute = abs_data[idx_start:idx_end].max()
    noise_relative = noise_absolute / abs_data.max()

    if verbose:
        _log_window_selection(data_trace.id,
                              "Absolute Noise Level: %e" % noise_absolute)
        _log_window_selection(data_trace.id,
                              "Relative Noise Level: %e" % noise_relative)

    # Basic global rejection criteria.
    accept_traces = True
    if (cc < min_cc) and (noise_relative > max_noise / 3.0):
        msg = "Correlation %.4f is below threshold of %.4f" % (cc, min_cc)
        if verbose:
            _log_window_selection(data_trace.id, msg)
        accept_traces = msg

    if noise_relative > max_noise:
        msg = "Noise level %.3f is above threshold of %.3f" % (noise_relative,
                                                               max_noise)
        if verbose:
            _log_window_selection(data_trace.id, msg)
        accept_traces = msg

    # Calculate the envelope of both data and synthetics. This is to make sure
    # that the amplitude of both is not too different over time and is
    # used as another selector. Only calculated if the trace is generally
    # accepted as it is fairly slow.
    if accept_traces is True:
        data_env = obspy.signal.filter.envelope(data)
        synth_env = obspy.signal.filter.envelope(synth)

    # -------------------------------------------------------------------------
    # Initial Plot setup.
    # -------------------------------------------------------------------------
    # All the plot calls are interleaved. I realize this is really ugly but
    # the alternative would be to either have two functions (one with plots,
    # one without) or split the plotting function in various subfunctions,
    # neither of which are acceptable in my opinion. The impact on
    # performance is minimal if plotting is turned off: all imports are lazy
    # and a couple of conditionals are cheap.
    if plot:
        import matplotlib.pylab as plt  # NOQA
        import matplotlib.patheffects as PathEffects  # NOQA

        if accept_traces is True:
            plt.figure(figsize=(18, 12))
            plt.subplots_adjust(left=0.05,
                                bottom=0.05,
                                right=0.98,
                                top=0.95,
                                wspace=None,
                                hspace=0.0)
            grid = (31, 1)

            # Axes showing the data.
            data_plot = plt.subplot2grid(grid, (0, 0), rowspan=8)
        else:
            # Only show one axes it the traces are not accepted.
            plt.figure(figsize=(18, 3))

        # Plot envelopes if needed.
        if accept_traces is True:
            plt.plot(times,
                     data_env,
                     color="black",
                     alpha=0.5,
                     lw=0.4,
                     label="data envelope")
            plt.plot(synthetic_trace.times(),
                     synth_env,
                     color="#e41a1c",
                     alpha=0.4,
                     lw=0.5,
                     label="synthetics envelope")

        plt.plot(times, data, color="black", label="data", lw=1.5)
        plt.plot(synthetic_trace.times(),
                 synth,
                 color="#e41a1c",
                 label="synthetics",
                 lw=1.5)

        # Symmetric around y axis.
        middle = data.mean()
        d_max, d_min = data.max(), data.min()
        r = max(d_max - middle, middle - d_min) * 1.1
        ylim = (middle - r, middle + r)
        xlim = (times[0], times[-1])
        plt.ylim(*ylim)
        plt.xlim(*xlim)

        offset = (xlim[1] - xlim[0]) * 0.005
        plt.vlines(first_tt_arrival, ylim[0], ylim[1], colors="#ff7f00", lw=2)
        plt.text(first_tt_arrival + offset,
                 ylim[1] - (ylim[1] - ylim[0]) * 0.02,
                 "first arrival",
                 verticalalignment="top",
                 horizontalalignment="left",
                 color="#ee6e00",
                 path_effects=[
                     PathEffects.withStroke(linewidth=3, foreground="white")
                 ])

        plt.vlines(first_tt_arrival - minimum_period / 2.0,
                   ylim[0],
                   ylim[1],
                   colors="#ff7f00",
                   lw=2)
        plt.text(first_tt_arrival - minimum_period / 2.0 - offset,
                 ylim[0] + (ylim[1] - ylim[0]) * 0.02,
                 "first arrival - min period / 2",
                 verticalalignment="bottom",
                 horizontalalignment="right",
                 color="#ee6e00",
                 path_effects=[
                     PathEffects.withStroke(linewidth=3, foreground="white")
                 ])

        for velocity in [6, 5, 4, 3, min_velocity]:
            tt = dist_in_km / velocity
            plt.vlines(tt, ylim[0], ylim[1], colors="gray", lw=2)
            if velocity == min_velocity:
                hal = "right"
                o_s = -1.0 * offset
            else:
                hal = "left"
                o_s = offset
            plt.text(tt + o_s,
                     ylim[0] + (ylim[1] - ylim[0]) * 0.02,
                     str(velocity) + " km/s",
                     verticalalignment="bottom",
                     horizontalalignment=hal,
                     color="0.15")
        plt.vlines(dist_in_km / min_velocity + minimum_period / 2.0,
                   ylim[0],
                   ylim[1],
                   colors="gray",
                   lw=2)
        plt.text(dist_in_km / min_velocity + minimum_period / 2.0 - offset,
                 ylim[1] - (ylim[1] - ylim[0]) * 0.02,
                 "min surface velocity + min period / 2",
                 verticalalignment="top",
                 horizontalalignment="right",
                 color="0.15",
                 path_effects=[
                     PathEffects.withStroke(linewidth=3, foreground="white")
                 ])

        plt.hlines(noise_absolute,
                   xlim[0],
                   xlim[1],
                   linestyle="--",
                   color="gray")
        plt.hlines(-noise_absolute,
                   xlim[0],
                   xlim[1],
                   linestyle="--",
                   color="gray")
        plt.text(offset,
                 noise_absolute + (ylim[1] - ylim[0]) * 0.01,
                 "noise level",
                 verticalalignment="bottom",
                 horizontalalignment="left",
                 color="0.15",
                 path_effects=[
                     PathEffects.withStroke(linewidth=3, foreground="white")
                 ])
        plt.legend(loc="lower right",
                   fancybox=True,
                   framealpha=0.5,
                   fontsize="small")
        plt.gca().xaxis.set_ticklabels([])

        # Plot the basic global information.
        ax = plt.gca()
        txt = (
            "Total CC Coeff: %.4f\nAbsolute Noise: %e\nRelative Noise: %.3f" %
            (cc, noise_absolute, noise_relative))
        ax.text(0.01,
                0.95,
                txt,
                transform=ax.transAxes,
                fontdict=dict(fontsize="small", ha='left', va='top'),
                bbox=dict(boxstyle="round", fc="w", alpha=0.8))
        plt.suptitle("Channel %s" % data_trace.id, fontsize="larger")

        # Show plot and return if not accepted.
        if accept_traces is not True:
            txt = "Rejected: %s" % (accept_traces)
            ax.text(0.99,
                    0.95,
                    txt,
                    transform=ax.transAxes,
                    fontdict=dict(fontsize="small", ha='right', va='top'),
                    bbox=dict(boxstyle="round", fc="red", alpha=1.0))
            plt.show()
    if accept_traces is not True:
        return []

    # Initialise masked arrays. The mask will be set to True where no
    # windows are chosen.
    time_windows = np.ma.ones(npts)
    time_windows.mask = False
    if plot:
        old_time_windows = time_windows.copy()

    # Elimination Stage 1: Eliminate everything half a period before or
    # after the minimum and maximum travel times, respectively.
    # theoretical arrival as positive.
    min_idx = int((first_tt_arrival - (minimum_period / 2.0)) / dt)
    max_idx = int(
        math.ceil((dist_in_km / min_velocity + minimum_period / 2.0) / dt))
    time_windows.mask[:min_idx + 1] = True
    time_windows.mask[max_idx:] = True
    if plot:
        plt.subplot2grid(grid, (8, 0), rowspan=1)
        _plot_mask(time_windows,
                   old_time_windows,
                   name="TRAVELTIME ELIMINATION")
        old_time_windows = time_windows.copy()

    # -------------------------------------------------------------------------
    # Compute sliding time shifts and correlation coefficients for time
    # frames that passed the traveltime elimination stage.
    # -------------------------------------------------------------------------
    # Allocate arrays to collect the time dependent values.
    sliding_time_shift = np.ma.zeros(npts, dtype="float32")
    sliding_time_shift.mask = True
    max_cc_coeff = np.ma.zeros(npts, dtype="float32")
    max_cc_coeff.mask = True

    for start_idx, end_idx, midpoint_idx in _window_generator(
            npts, window_length):
        if not min_idx < midpoint_idx < max_idx:
            continue

        # Slice windows. Create a copy to be able to taper without affecting
        # the original time series.
        data_window = data[start_idx:end_idx].copy() * taper
        synthetic_window = \
            synth[start_idx: end_idx].copy() * taper

        # Elimination Stage 2: Skip windows that have essentially no energy
        # to avoid instabilities. No windows can be picked in these.
        if synthetic_window.ptp() < synth.ptp() * 0.001:
            time_windows.mask[midpoint_idx] = True
            continue

        # Calculate the time shift. Here this is defined as the shift of the
        # synthetics relative to the data. So a value of 2, for instance, means
        # that the synthetics are 2 timesteps later then the data.
        cc = np.correlate(data_window, synthetic_window, mode="full")

        time_shift = cc.argmax() - window_length + 1
        # Express the time shift in fraction of the minimum period.
        sliding_time_shift[midpoint_idx] = (time_shift * dt) / minimum_period

        # Normalized cross correlation.
        max_cc_value = cc.max() / np.sqrt(
            (synthetic_window**2).sum() * (data_window**2).sum())
        max_cc_coeff[midpoint_idx] = max_cc_value

    if plot:
        plt.subplot2grid(grid, (9, 0), rowspan=1)
        _plot_mask(time_windows,
                   old_time_windows,
                   name="NO ENERGY IN CC WINDOW")
        # Axes with the CC coeffs
        plt.subplot2grid(grid, (15, 0), rowspan=4)
        plt.hlines(0, xlim[0], xlim[1], color="lightgray")
        plt.hlines(-threshold_shift,
                   xlim[0],
                   xlim[1],
                   color="gray",
                   linestyle="--")
        plt.hlines(threshold_shift,
                   xlim[0],
                   xlim[1],
                   color="gray",
                   linestyle="--")
        plt.text(5,
                 -threshold_shift - (2) * 0.03,
                 "threshold",
                 verticalalignment="top",
                 horizontalalignment="left",
                 color="0.15",
                 path_effects=[
                     PathEffects.withStroke(linewidth=3, foreground="white")
                 ])
        plt.plot(times,
                 sliding_time_shift,
                 color="#377eb8",
                 label="Time shift in fraction of minimum period",
                 lw=1.5)
        ylim = plt.ylim()
        plt.yticks([-0.75, 0, 0.75])
        plt.xticks([300, 600, 900, 1200, 1500, 1800])
        plt.ylim(ylim[0], ylim[1] + ylim[1] - ylim[0])
        plt.ylim(-1.0, 1.0)
        plt.xlim(xlim)
        plt.gca().xaxis.set_ticklabels([])
        plt.legend(loc="lower right",
                   fancybox=True,
                   framealpha=0.5,
                   fontsize="small")

        plt.subplot2grid(grid, (10, 0), rowspan=4)
        plt.hlines(threshold_correlation,
                   xlim[0],
                   xlim[1],
                   color="0.15",
                   linestyle="--")
        plt.hlines(1, xlim[0], xlim[1], color="lightgray")
        plt.hlines(0, xlim[0], xlim[1], color="lightgray")
        plt.text(5,
                 threshold_correlation + (1.4) * 0.01,
                 "threshold",
                 verticalalignment="bottom",
                 horizontalalignment="left",
                 color="0.15",
                 path_effects=[
                     PathEffects.withStroke(linewidth=3, foreground="white")
                 ])
        plt.plot(times,
                 max_cc_coeff,
                 color="#4daf4a",
                 label="Maximum CC coefficient",
                 lw=1.5)
        plt.ylim(-0.2, 1.2)
        plt.yticks([0, 0.5, 1])
        plt.xticks([300, 600, 900, 1200, 1500, 1800])
        plt.xlim(xlim)
        plt.gca().xaxis.set_ticklabels([])
        plt.legend(loc="lower right",
                   fancybox=True,
                   framealpha=0.5,
                   fontsize="small")

    # Elimination Stage 3: Mark all areas where the normalized cross
    # correlation coefficient is under threshold_correlation as negative
    if plot:
        old_time_windows = time_windows.copy()
    time_windows.mask[max_cc_coeff < threshold_correlation] = True
    if plot:
        plt.subplot2grid(grid, (14, 0), rowspan=1)
        _plot_mask(time_windows,
                   old_time_windows,
                   name="CORRELATION COEFF THRESHOLD ELIMINATION")

    # Elimination Stage 4: Mark everything with an absolute travel time
    # shift of more than # threshold_shift times the dominant period as
    # negative
    if plot:
        old_time_windows = time_windows.copy()
    time_windows.mask[np.ma.abs(sliding_time_shift) > threshold_shift] = True
    if plot:
        plt.subplot2grid(grid, (19, 0), rowspan=1)
        _plot_mask(time_windows,
                   old_time_windows,
                   name="TIME SHIFT THRESHOLD ELIMINATION")

    # Elimination Stage 5: Mark the area around every "travel time shift
    # jump" (based on the traveltime time difference) negative. The width of
    # the area is currently chosen to be a tenth of a dominant period to
    # each side.
    if plot:
        old_time_windows = time_windows.copy()
    sample_buffer = int(np.ceil(minimum_period / dt * 0.1))
    indices = np.ma.where(np.ma.abs(np.ma.diff(sliding_time_shift)) > 0.1)[0]
    for index in indices:
        time_windows.mask[index - sample_buffer:index + sample_buffer] = True
    if plot:
        plt.subplot2grid(grid, (20, 0), rowspan=1)
        _plot_mask(time_windows,
                   old_time_windows,
                   name="TIME SHIFT JUMPS ELIMINATION")

    # Clip both to avoid large numbers by division.
    stacked = np.vstack([
        np.ma.clip(synth_env,
                   synth_env.max() * min_envelope_similarity * 0.5,
                   synth_env.max()),
        np.ma.clip(data_env,
                   data_env.max() * min_envelope_similarity * 0.5,
                   data_env.max())
    ])
    # Ratio.
    ratio = stacked.min(axis=0) / stacked.max(axis=0)

    # Elimination Stage 6: Make sure the amplitudes of both don't vary too
    # much.
    if plot:
        old_time_windows = time_windows.copy()
    time_windows.mask[ratio < min_envelope_similarity] = True
    if plot:
        plt.subplot2grid(grid, (25, 0), rowspan=1)
        _plot_mask(time_windows,
                   old_time_windows,
                   name="ENVELOPE AMPLITUDE SIMILARITY ELIMINATION")

    if plot:
        plt.subplot2grid(grid, (21, 0), rowspan=4)
        plt.hlines(min_envelope_similarity,
                   xlim[0],
                   xlim[1],
                   color="gray",
                   linestyle="--")
        plt.text(5,
                 min_envelope_similarity + (2) * 0.03,
                 "threshold",
                 verticalalignment="bottom",
                 horizontalalignment="left",
                 color="0.15",
                 path_effects=[
                     PathEffects.withStroke(linewidth=3, foreground="white")
                 ])
        plt.plot(times,
                 ratio,
                 color="#9B59B6",
                 label="Envelope amplitude similarity",
                 lw=1.5)
        plt.yticks([0, 0.2, 0.4, 0.6, 0.8, 1.0])
        plt.ylim(0.05, 1.05)
        plt.xticks([300, 600, 900, 1200, 1500, 1800])
        plt.xlim(xlim)
        plt.gca().xaxis.set_ticklabels([])
        plt.legend(loc="lower right",
                   fancybox=True,
                   framealpha=0.5,
                   fontsize="small")

    # First minimum window length elimination stage. This is cheap and if
    # not done it can easily destabilize the peak-and-trough marching stage
    # which would then have to deal with way more edge cases.
    if plot:
        old_time_windows = time_windows.copy()
    min_length = \
        min(minimum_period / dt * min_length_period, maximum_period / dt)
    for i in flatnotmasked_contiguous(time_windows):
        # Step 7: Throw away all windows with a length of less then
        # min_length_period the dominant period.
        if (i.stop - i.start) < min_length:
            time_windows.mask[i.start:i.stop] = True
    if plot:
        plt.subplot2grid(grid, (26, 0), rowspan=1)
        _plot_mask(time_windows,
                   old_time_windows,
                   name="MINIMUM WINDOW LENGTH ELIMINATION 1")

    # -------------------------------------------------------------------------
    # Peak and trough marching algorithm
    # -------------------------------------------------------------------------
    final_windows = []
    for i in flatnotmasked_contiguous(time_windows):
        # Cut respective windows.
        window_npts = i.stop - i.start
        synthetic_window = synth[i.start:i.stop]
        data_window = data[i.start:i.stop]

        # Find extrema in the data and the synthetics.
        data_p, data_t = find_local_extrema(data_window)
        synth_p, synth_t = find_local_extrema(synthetic_window)

        window_mask = np.ones(window_npts, dtype="bool")

        closest_peaks = find_closest(data_p, synth_p)
        diffs = np.diff(closest_peaks)

        for idx in np.where(diffs == 1)[0]:
            if idx > 0:
                start = synth_p[idx - 1]
            else:
                start = 0
            if idx < (len(synth_p) - 1):
                end = synth_p[idx + 1]
            else:
                end = -1
            window_mask[start:end] = False

        closest_troughs = find_closest(data_t, synth_t)
        diffs = np.diff(closest_troughs)

        for idx in np.where(diffs == 1)[0]:
            if idx > 0:
                start = synth_t[idx - 1]
            else:
                start = 0
            if idx < (len(synth_t) - 1):
                end = synth_t[idx + 1]
            else:
                end = -1
            window_mask[start:end] = False

        window_mask = np.ma.masked_array(window_mask, mask=window_mask)

        if window_mask.mask.all():
            continue

        for j in flatnotmasked_contiguous(window_mask):
            final_windows.append((i.start + j.start, i.start + j.stop))

    if plot:
        old_time_windows = time_windows.copy()
    time_windows.mask[:] = True
    for start, stop in final_windows:
        time_windows.mask[start:stop] = False
    if plot:
        plt.subplot2grid(grid, (27, 0), rowspan=1)
        _plot_mask(time_windows,
                   old_time_windows,
                   name="PEAK AND TROUGH MARCHING ELIMINATION")

    # Loop through all the time windows, remove windows not satisfying the
    # minimum number of peaks and troughs per window. Acts mainly as a
    # safety guard.
    old_time_windows = time_windows.copy()
    for i in flatnotmasked_contiguous(old_time_windows):
        synthetic_window = synth[i.start:i.stop]
        data_window = data[i.start:i.stop]
        data_p, data_t = find_local_extrema(data_window)
        synth_p, synth_t = find_local_extrema(synthetic_window)
        if np.min([len(synth_p), len(synth_t), len(data_p), len(data_t)]) < \
                min_peaks_troughs:
            time_windows.mask[i.start:i.stop] = True
    if plot:
        plt.subplot2grid(grid, (28, 0), rowspan=1)
        _plot_mask(time_windows,
                   old_time_windows,
                   name="PEAK/TROUGH COUNT ELIMINATION")

    # Second minimum window length elimination stage.
    if plot:
        old_time_windows = time_windows.copy()
    min_length = \
        min(minimum_period / dt * min_length_period, maximum_period / dt)
    for i in flatnotmasked_contiguous(time_windows):
        # Step 7: Throw away all windows with a length of less then
        # min_length_period the dominant period.
        if (i.stop - i.start) < min_length:
            time_windows.mask[i.start:i.stop] = True
    if plot:
        plt.subplot2grid(grid, (29, 0), rowspan=1)
        _plot_mask(time_windows,
                   old_time_windows,
                   name="MINIMUM WINDOW LENGTH ELIMINATION 2")

    # Final step, eliminating windows with little energy.
    final_windows = []
    for j in flatnotmasked_contiguous(time_windows):
        # Again assert a certain minimal length.
        if (j.stop - j.start) < min_length:
            continue

        # Compare the energy in the data window and the synthetic window.
        data_energy = (data[j.start:j.stop]**2).sum()
        synth_energy = (synth[j.start:j.stop]**2).sum()
        energies = sorted([data_energy, synth_energy])
        if energies[1] > max_energy_ratio * energies[0]:
            if verbose:
                _log_window_selection(
                    data_trace.id,
                    "Deselecting window due to energy ratio between "
                    "data and synthetics.")
            continue

        # Check that amplitudes in the data are above the noise
        if noise_absolute / data[j.start: j.stop].ptp() > \
                max_noise_window:
            if verbose:
                _log_window_selection(
                    data_trace.id,
                    "Deselecting window due having no amplitude above the "
                    "signal to noise ratio.")
        final_windows.append((j.start, j.stop))

    if plot:
        old_time_windows = time_windows.copy()
    time_windows.mask[:] = True
    for start, stop in final_windows:
        time_windows.mask[start:stop] = False

    if plot:
        plt.subplot2grid(grid, (30, 0), rowspan=1)
        _plot_mask(time_windows,
                   old_time_windows,
                   name="LITTLE ENERGY ELIMINATION")

    if verbose:
        _log_window_selection(
            data_trace.id, "Done, Selected %i window(s)" % len(final_windows))

    # Final step is to convert the index value windows to actual times.
    windows = []
    for start, stop in final_windows:
        start = data_starttime + start * data_delta
        stop = data_starttime + stop * data_delta
        windows.append((start, stop))

    if plot:
        # Plot the final windows to the data axes.
        import matplotlib.transforms as mtransforms  # NOQA
        ax = data_plot
        trans = mtransforms.blended_transform_factory(ax.transData,
                                                      ax.transAxes)
        for start, stop in final_windows:
            ax.fill_between([start * data_delta, stop * data_delta],
                            0,
                            1,
                            facecolor="#CDDC39",
                            alpha=0.5,
                            transform=trans)

        plt.show()

    return windows
dip = [60, 70, 80]
rake = [-90, 0, 90]

Pia173a = []
Sia173a = []
Pia235b = []
Sia235b = []
Pia325a = []
Sia325a = []

# Mars:
radius = 3389.5

for mod in model_ls:
    mars = TauPyModel(model=mod)

    #--model velocity input---

    if mod == 'DWAK':
        DWAK_depth = [45, 15, 5]
        Pia173a = []
        Sia173a = []
        Pia235b = []
        Sia235b = []
        Pia325a = []
        Sia325a = []
        for depth in DWAK_depth:
            if depth <= 66 and depth > 10:
                Pvelz = 5.90405
                Svelz = 3.30798
Пример #30
0
def process_streams(streams, origin, config=None, old_streams=None):
    """
    Run processing steps from the config file.

    This method looks in the 'processing' config section and loops over those
    steps and hands off the config options to the appropriate prcessing method.
    Streams that fail any of the tests are kepth in the StreamCollection but
    the parameter 'passed_checks' is set to False and subsequent processing
    steps are not applied once a check has failed.

    Args:
        streams (StreamCollection):
            A StreamCollection object of unprocessed streams.
        origin (ScalarEvent):
            ScalarEvent object.
        config (dict):
            Configuration dictionary (or None). See get_config().
        old_streams (StreamCollection):
            A StreamCollection object of previously processed streams that contain
            manually reviewed information. None if not reprocessing.

    Returns:
        A StreamCollection object.
    """

    if not isinstance(streams, (StreamCollection, StreamArray)):
        raise ValueError("streams must be a StreamCollection instance.")

    if config is None:
        config = get_config()

    event_time = origin.time
    event_lon = origin.longitude
    event_lat = origin.latitude

    # -------------------------------------------------------------------------
    # Compute a travel-time matrix for interpolation later in the
    # trim_multiple events step
    if any("trim_multiple_events" in dict for dict in config["processing"]):
        travel_time_df, catalog = create_travel_time_dataframe(
            streams, **config["travel_time"])

    window_conf = config["windows"]
    model = TauPyModel(config["pickers"]["travel_time"]["model"])

    for st in streams:
        logging.debug(f"Checking stream {st.get_id()}...")
        # Estimate noise/signal split time
        st = signal_split(st,
                          origin,
                          model,
                          picker_config=config["pickers"],
                          config=config)

        # Estimate end of signal
        end_conf = window_conf["signal_end"]
        event_mag = origin.magnitude
        st = signal_end(
            st,
            event_time=event_time,
            event_lon=event_lon,
            event_lat=event_lat,
            event_mag=event_mag,
            **end_conf,
        )
        wcheck_conf = window_conf["window_checks"]
        if wcheck_conf["do_check"]:
            st = window_checks(
                st,
                min_noise_duration=wcheck_conf["min_noise_duration"],
                min_signal_duration=wcheck_conf["min_signal_duration"],
            )

    # -------------------------------------------------------------------------
    # Begin processing steps
    processing_steps = config["processing"]

    # Loop over streams
    for i, stream in enumerate(streams):
        logging.info(f"Stream: {stream.get_id()}")
        # Check if we are reprocessing (indicated by presence of old_streams)
        if old_streams is not None:
            old_stream = old_streams[i]
            for j in range(len(old_stream)):
                tr_old = old_stream[j]
                # Check if old_streams have review parameters because it is not
                # guaranteed
                if tr_old.hasParameter("review"):
                    review_dict = tr_old.getParameter("review")
                    # Transfer review parameter from old stream to new
                    stream[j].setParameter("review", review_dict)
                    # Was it failed via manual review?
                    if "accepted" in review_dict:
                        if not review_dict["accepted"]:
                            stream[j].fail("Manual review")

        for processing_step_dict in processing_steps:

            key_list = list(processing_step_dict.keys())
            if len(key_list) != 1:
                raise ValueError(
                    "Each processing step must contain exactly one key.")
            step_name = key_list[0]

            logging.debug(f"Processing step: {step_name}")
            step_args = processing_step_dict[step_name]
            # Using globals doesn't seem like a great solution here, but it
            # works.
            if step_name not in globals():
                raise ValueError(f"Processing step {step_name} is not valid.")

            # Origin is required by some steps and has to be handled specially.
            # There must be a better solution for this...
            if step_name in REQ_ORIGIN:
                step_args["origin"] = origin
            if step_name == "trim_multiple_events":
                step_args["catalog"] = catalog
                step_args["travel_time_df"] = travel_time_df
            if step_name == "compute_snr":
                step_args["mag"] = origin.magnitude

            if step_args is None:
                stream = globals()[step_name](stream, config)
            else:
                stream = globals()[step_name](stream,
                                              **step_args,
                                              config=config)

    # -------------------------------------------------------------------------
    # Begin colocated instrument selection
    if "colocated" in config:
        colocated_conf = config["colocated"]
        if isinstance(streams, StreamCollection):
            streams.select_colocated(**colocated_conf, origin=origin)

    for st in streams:
        for tr in st:
            tr.stats.standard.process_level = PROCESS_LEVELS["V2"]

    logging.info("Finished processing streams.")
    return streams