Пример #1
0
def Amp_pick_sfile(sfile, datapath, respdir, chans=['Z'], var_wintype=True, \
                   winlen=0.9, pre_pick=0.2, pre_filt=True, lowcut=1.0,\
                   highcut=20.0, corners=4):
    """
    Function to read information from a SEISAN s-file, load the data and the
    picks, cut the data for the channels given around the S-window, simulate
    a Wood Anderson seismometer, then pick the maximum peak-to-trough
    amplitude.

    Output will be put into a mag_calc.out file which will be in full S-file
    format and can be copied to a REA database.

    :type sfile: String
    :type datapath: String
    :param datapath: Path to the waveform files - usually the path to the WAV directory
    :type respdir: String
    :param respdir: Path to the response information directory
    :type chans: List of strings
    :param chans: List of the channels to pick on, defaults to ['Z'] - should
                just be the orientations, e.g. Z,1,2,N,E
    :type var_wintype: Bool
    :param var_wintype: If True, the winlen will be
                    multiplied by the P-S time if both P and S picks are
                    available, otherwise it will be multiplied by the hypocentral
                    distance*0.34 - dervided using a p-s ratio of 1.68 and
                    S-velocity of 1.5km/s to give a large window, defaults to True
    :type winlen: Float
    :param winlen: Length of window, see above parameter, if var_wintype is False
                    Then this will be in seconds, otherwise it is the multiplier
                    to the p-s time, defaults to 0.5
    :type pre_pick: Float
    :param pre_pick: Time before the s-pick to start the cut window, defaults
                    to 0.2
    :type pre_filt: Bool
    :param pre_filt: To apply a pre-filter or not, defaults to True
    :type lowcut: Float
    :param lowcut: Lowcut in Hz for the pre-filter, defaults to 1.0
    :type highcut: Float
    :param highcut: Highcut in Hz for the pre-filter, defaults to 20.0
    :type corners: Int
    :param corners: Number of corners to use in the pre-filter
    """
    # Hardwire a p-s multiplier of hypocentral distance based on p-s ratio of
    # 1.68 and an S-velocity 0f 1.5km/s, deliberately chosen to be quite slow
    ps_multiplier=0.34
    from eqcorrscan.utils import Sfile_util
    from obspy import read
    from scipy.signal import iirfilter
    from obspy.signal.invsim import paz2AmpValueOfFreqResp
    import warnings
    # First we need to work out what stations have what picks
    picks=Sfile_util.readpicks(sfile)
    # Convert these picks into a lists
    stations=[] # List of stations
    channels=[] # List of channels
    picktimes=[] # List of pick times
    picktypes=[] # List of pick types
    distances=[] # List of hypocentral distances
    picks_out=[]
    for pick in picks:
        if pick.phase in ['P','S']:
            picks_out.append(pick) # Need to be able to remove this if there
                                   # isn't data for a station!
            stations.append(pick.station)
            channels.append(pick.channel)
            picktimes.append(pick.time)
            picktypes.append(pick.phase)
            distances.append(pick.distance)
    # Read in waveforms
    stream=read(datapath+'/'+Sfile_util.readwavename(sfile)[0])
    if len(Sfile_util.readwavename(sfile)) > 1:
        for wavfile in Sfile_util.readwavename(sfile):
            stream+=read(datapath+'/'+wavfile)
    stream.merge() # merge the data, just in case!
    # For each station cut the window
    uniq_stas=list(set(stations))
    for sta in uniq_stas:
        for chan in chans:
            print 'Working on '+sta+' '+chan
            tr=stream.select(station=sta, channel='*'+chan)
            if not tr:
                # Remove picks from file
                # picks_out=[picks_out[i] for i in xrange(len(picks))\
                           # if picks_out[i].station+picks_out[i].channel != \
                           # sta+chan]
            	warnings.warn('There is no station and channel match in the wavefile!')
                break
            else:
                tr=tr[0]
            # Apply the pre-filter
            if pre_filt:
                try:
                    tr.detrend('simple')
                except:
                    dummy=tr.split()
                    dummy.detrend('simple')
                    tr=dummy.merge()[0]
                tr.filter('bandpass',freqmin=lowcut, freqmax=highcut,\
                             corners=corners)
            sta_picks=[i for i in xrange(len(stations)) \
                           if stations[i]==sta]
            hypo_dist=picks[sta_picks[0]].distance
            CAZ=picks[sta_picks[0]].CAZ
            if var_wintype:
                if 'S' in [picktypes[i] for i in sta_picks] and\
                   'P' in [picktypes[i] for i in sta_picks]:
                    # If there is an S-pick we can use this :D
                    S_pick=[picktimes[i] for i in sta_picks \
                            if picktypes[i]=='S']
                    S_pick=min(S_pick)
                    P_pick=[picktimes[i] for i in sta_picks \
                            if picktypes[i]=='P']
                    P_pick=min(P_pick)
                    try:
                    	tr.trim(starttime=S_pick-pre_pick, \
                               endtime=S_pick+(S_pick-P_pick)*winlen)
                    except:
                    	break
                elif 'S' in [picktypes[i] for i in sta_picks]:
                    S_pick=[picktimes[i] for i in sta_picks \
                            if picktypes[i]=='S']
                    S_pick=min(S_pick)
                    P_modelled=S_pick-hypo_dist*ps_multiplier
                    try:
                    	tr.trim(starttime=S_pick-pre_pick,\
                            endtime=S_pick+(S_pick-P_modelled)*winlen)
                    except:
                    	break
                else:
                    # In this case we only have a P pick
                    P_pick=[picktimes[i] for i in sta_picks \
                            if picktypes[i]=='P']
                    P_pick=min(P_pick)
                    S_modelled=P_pick+hypo_dist*ps_multiplier
                    try:
                    	tr.trim(starttime=S_modelled-pre_pick,\
                        	    endtime=S_modelled+(S_modelled-P_pick)*winlen)
                    except:
                    	break
                # Work out the window length based on p-s time or distance
            elif 'S' in [picktypes[i] for i in sta_picks]:
                # If the window is fixed we still need to find the start time,
                # which can be based either on the S-pick (this elif), or
                # on the hypocentral distance and the P-pick

                # Take the minimum S-pick time if more than one S-pick is available
                S_pick=[picktimes[i] for i in sta_picks \
                           if picktypes[i]=='S']
                S_pick=min(S_pick)
                try:
                	tr.trim(starttime=S_pick-pre_pick, endtime=S_pick+winlen)
                except:
                	break
            else:
                # In this case, there is no S-pick and the window length is fixed
                # We need to calculate an expected S_pick based on the hypocentral
                # distance, this will be quite hand-wavey as we are not using
                # any kind of velocity model.
                P_pick=[picktimes[i] for i in sta_picks \
                           if picktypes[i]=='P']
                P_pick=min(P_pick)
                hypo_dist=[distances[i] for i in sta_picks\
                           if picktypes[i]=='P'][0]
                S_modelled=P_pick+hypo_dist*ps_multiplier
                try:
                	tr.trim(starttime=S_modelled-pre_pick,\
                    	       endtime=S_modelled+winlen)
                except:
        	       break
            # Find the response information
            resp_info=_find_resp(tr.stats.station, tr.stats.channel,\
                           tr.stats.network, tr.stats.starttime, tr.stats.delta,\
                                 respdir)
            PAZ=[]
            seedresp=[]
            if resp_info and 'gain' in resp_info:
                PAZ=resp_info
            elif resp_info:
                seedresp=resp_info
            # Simulate a Wood Anderson Seismograph
            if PAZ and len(tr.data) > 10: # Set ten data points to be the minimum to pass
                tr=_sim_WA(tr, PAZ, None, 10)
            elif seedresp and len(tr.data) > 10:
                tr=_sim_WA(tr, None, seedresp, 10)
            elif len(tr.data) > 10:
                warnings.warn('No PAZ for '+tr.stats.station+' '+\
                                 tr.stats.channel+' at time: '+\
                                 str(tr.stats.starttime))
                continue
            if len(tr.data) <= 10:
                # Should remove the P and S picks if len(tr.data)==0
                warnings.warn('No data found for: '+tr.stats.station)
                # print 'No data in miniseed file for '+tr.stats.station+\
                              # ' removing picks'
                # picks_out=[picks_out[i] for i in xrange(len(picks_out))\
                           # if i not in sta_picks]
            	break
            # Get the amplitude
            amplitude, period, delay= _max_p2t(tr.data, tr.stats.delta)
            if amplitude==0.0:
                break
            print 'Amplitude picked: '+str(amplitude)
            # Note, amplitude should be in meters at the moment!
            # Remove the pre-filter response
            if pre_filt:
                # Generate poles and zeros for the filter we used earlier - this
                # is how the filter is designed in the convenience methods of
                # filtering in obspy.
                z, p, k=iirfilter(corners, [lowcut/(0.5*tr.stats.sampling_rate),\
                                            highcut/(0.5*tr.stats.sampling_rate)],\
                                  btype='band', ftype='butter', output='zpk')
                filt_paz={'poles': list(p),
                          'zeros': list(z),
                          'gain': k,
                          'sensitivity':  1.0}
                amplitude /= (paz2AmpValueOfFreqResp(filt_paz, 1/period) * \
                              filt_paz['sensitivity'])
            # Convert amplitude to mm
            if PAZ: # Divide by Gain to get to nm (returns pm? 10^-12)
                # amplitude *=PAZ['gain']
                amplitude /= 1000
            if seedresp: # Seedresp method returns mm
                amplitude *= 1000000
            # Write out the half amplitude, approximately the peak amplitude as
            # used directly in magnitude calculations
            # Page 343 of Seisan manual:
            #   Amplitude (Zero-Peak) in units of nm, nm/s, nm/s^2 or counts
            amplitude *= 0.5
            # Generate a PICK type object for this pick
            picks_out.append(Sfile_util.PICK(station=tr.stats.station,
                                         channel=tr.stats.channel,
                                         impulsivity=' ',
                                         phase='IAML',
                                         weight='', polarity=' ',
                                         time=tr.stats.starttime+delay,
                                         coda=999, amplitude=amplitude,
                                         peri=period, azimuth=float('NaN'),
                                         velocity=float('NaN'), AIN=999, SNR='',
                                         azimuthres=999, timeres=float('NaN'),
                                         finalweight=999, distance=hypo_dist,
                                         CAZ=CAZ))
    # Copy the header from the sfile to a new local S-file
    fin=open(sfile,'r')
    fout=open('mag_calc.out','w')
    for line in fin:
        if not line[79]=='7':
            fout.write(line)
        else:
            fout.write(line)
            break
    fin.close()
    fout.close()
    # Write picks out to new s-file
    for pick in picks_out:
        print pick
    Sfile_util.populateSfile('mag_calc.out',picks_out)
    return picks
Пример #2
0
def Amp_pick_sfile(sfile,
                   datapath,
                   respdir,
                   chans=['Z'],
                   var_wintype=True,
                   winlen=0.9,
                   pre_pick=0.2,
                   pre_filt=True,
                   lowcut=1.0,
                   highcut=20.0,
                   corners=4):
    """
    Function to read information from a SEISAN s-file, load the data and the \
    picks, cut the data for the channels given around the S-window, simulate \
    a Wood Anderson seismometer, then pick the maximum peak-to-trough \
    amplitude.

    Output will be put into a mag_calc.out file which will be in full S-file \
    format and can be copied to a REA database.

    :type sfile: string
    :type datapath: string
    :param datapath: Path to the waveform files - usually the path to the WAV \
        directory
    :type respdir: string
    :param respdir: Path to the response information directory
    :type chans: List of strings
    :param chans: List of the channels to pick on, defaults to ['Z'] - should \
        just be the orientations, e.g. Z,1,2,N,E
    :type var_wintype: bool
    :param var_wintype: If True, the winlen will be \
        multiplied by the P-S time if both P and S picks are \
        available, otherwise it will be multiplied by the \
        hypocentral distance*0.34 - dervided using a p-s ratio of \
        1.68 and S-velocity of 1.5km/s to give a large window, \
        defaults to True
    :type winlen: float
    :param winlen: Length of window, see above parameter, if var_wintype is \
        False then this will be in seconds, otherwise it is the \
        multiplier to the p-s time, defaults to 0.5.
    :type pre_pick: float
    :param pre_pick: Time before the s-pick to start the cut window, defaults \
        to 0.2
    :type pre_filt: bool
    :param pre_filt: To apply a pre-filter or not, defaults to True
    :type lowcut: float
    :param lowcut: Lowcut in Hz for the pre-filter, defaults to 1.0
    :type highcut: float
    :param highcut: Highcut in Hz for the pre-filter, defaults to 20.0
    :type corners: int
    :param corners: Number of corners to use in the pre-filter
    """
    # Hardwire a p-s multiplier of hypocentral distance based on p-s ratio of
    # 1.68 and an S-velocity 0f 1.5km/s, deliberately chosen to be quite slow
    ps_multiplier = 0.34
    from eqcorrscan.utils import Sfile_util
    from obspy import read
    from scipy.signal import iirfilter
    from obspy.signal.invsim import paz2AmpValueOfFreqResp
    import warnings
    # First we need to work out what stations have what picks
    event = Sfile_util.readpicks(sfile)[0]
    # Convert these picks into a lists
    stations = []  # List of stations
    channels = []  # List of channels
    picktimes = []  # List of pick times
    picktypes = []  # List of pick types
    distances = []  # List of hypocentral distances
    picks_out = []
    for pick in event.picks:
        if pick.phase_hint in ['P', 'S']:
            picks_out.append(pick)  # Need to be able to remove this if there
            # isn't data for a station!
            stations.append(pick.waveform_id.station_code)
            channels.append(pick.waveform_id.channel_code)
            picktimes.append(pick.time)
            picktypes.append(pick.phase_hint)
            arrival = [
                arrival for arrival in event.origins[0].arrivals
                if arrival.pick_id == pick.resource_id
            ]
            distances.append(arrival.distance)
    # Read in waveforms
    stream = read(datapath + '/' + Sfile_util.readwavename(sfile)[0])
    if len(Sfile_util.readwavename(sfile)) > 1:
        for wavfile in Sfile_util.readwavename(sfile):
            stream += read(datapath + '/' + wavfile)
    stream.merge()  # merge the data, just in case!
    # For each station cut the window
    uniq_stas = list(set(stations))
    del arrival
    for sta in uniq_stas:
        for chan in chans:
            print 'Working on ' + sta + ' ' + chan
            tr = stream.select(station=sta, channel='*' + chan)
            if not tr:
                # Remove picks from file
                # picks_out=[picks_out[i] for i in xrange(len(picks))\
                # if picks_out[i].station+picks_out[i].channel != \
                # sta+chan]
                warnings.warn('There is no station and channel match in the ' +
                              'wavefile!')
                break
            else:
                tr = tr[0]
            # Apply the pre-filter
            if pre_filt:
                try:
                    tr.detrend('simple')
                except:
                    dummy = tr.split()
                    dummy.detrend('simple')
                    tr = dummy.merge()[0]
                tr.filter('bandpass',
                          freqmin=lowcut,
                          freqmax=highcut,
                          corners=corners)
            sta_picks = [
                i for i in xrange(len(stations)) if stations[i] == sta
            ]
            pick_id = event.picks[sta_picks[0]].resource_id
            arrival = [
                arrival for arrival in event.origins[0].arrivals
                if arrival.pick_id == pick_id
            ]
            hypo_dist = arrival.distance
            CAZ = arrival.azimuth
            if var_wintype:
                if 'S' in [picktypes[i] for i in sta_picks] and\
                   'P' in [picktypes[i] for i in sta_picks]:
                    # If there is an S-pick we can use this :D
                    S_pick = [
                        picktimes[i] for i in sta_picks if picktypes[i] == 'S'
                    ]
                    S_pick = min(S_pick)
                    P_pick = [
                        picktimes[i] for i in sta_picks if picktypes[i] == 'P'
                    ]
                    P_pick = min(P_pick)
                    try:
                        tr.trim(starttime=S_pick - pre_pick,
                                endtime=S_pick + (S_pick - P_pick) * winlen)
                    except:
                        break
                elif 'S' in [picktypes[i] for i in sta_picks]:
                    S_pick = [
                        picktimes[i] for i in sta_picks if picktypes[i] == 'S'
                    ]
                    S_pick = min(S_pick)
                    P_modelled = S_pick - hypo_dist * ps_multiplier
                    try:
                        tr.trim(starttime=S_pick - pre_pick,
                                endtime=S_pick +
                                (S_pick - P_modelled) * winlen)
                    except:
                        break
                else:
                    # In this case we only have a P pick
                    P_pick = [
                        picktimes[i] for i in sta_picks if picktypes[i] == 'P'
                    ]
                    P_pick = min(P_pick)
                    S_modelled = P_pick + hypo_dist * ps_multiplier
                    try:
                        tr.trim(starttime=S_modelled - pre_pick,
                                endtime=S_modelled +
                                (S_modelled - P_pick) * winlen)
                    except:
                        break
                # Work out the window length based on p-s time or distance
            elif 'S' in [picktypes[i] for i in sta_picks]:
                # If the window is fixed we still need to find the start time,
                # which can be based either on the S-pick (this elif), or
                # on the hypocentral distance and the P-pick

                # Take the minimum S-pick time if more than one S-pick is
                # available
                S_pick = [
                    picktimes[i] for i in sta_picks if picktypes[i] == 'S'
                ]
                S_pick = min(S_pick)
                try:
                    tr.trim(starttime=S_pick - pre_pick,
                            endtime=S_pick + winlen)
                except:
                    break
            else:
                # In this case, there is no S-pick and the window length is
                # fixed we need to calculate an expected S_pick based on the
                # hypocentral distance, this will be quite hand-wavey as we
                # are not using any kind of velocity model.
                P_pick = [
                    picktimes[i] for i in sta_picks if picktypes[i] == 'P'
                ]
                P_pick = min(P_pick)
                hypo_dist = [
                    distances[i] for i in sta_picks if picktypes[i] == 'P'
                ][0]
                S_modelled = P_pick + hypo_dist * ps_multiplier
                try:
                    tr.trim(starttime=S_modelled - pre_pick,
                            endtime=S_modelled + winlen)
                except:
                    break
            # Find the response information
            resp_info = _find_resp(tr.stats.station, tr.stats.channel,
                                   tr.stats.network, tr.stats.starttime,
                                   tr.stats.delta, respdir)
            PAZ = []
            seedresp = []
            if resp_info and 'gain' in resp_info:
                PAZ = resp_info
            elif resp_info:
                seedresp = resp_info
            # Simulate a Wood Anderson Seismograph
            if PAZ and len(tr.data) > 10:
                # Set ten data points to be the minimum to pass
                tr = _sim_WA(tr, PAZ, None, 10)
            elif seedresp and len(tr.data) > 10:
                tr = _sim_WA(tr, None, seedresp, 10)
            elif len(tr.data) > 10:
                warnings.warn('No PAZ for ' + tr.stats.station + ' ' +
                              tr.stats.channel + ' at time: ' +
                              str(tr.stats.starttime))
                continue
            if len(tr.data) <= 10:
                # Should remove the P and S picks if len(tr.data)==0
                warnings.warn('No data found for: ' + tr.stats.station)
                # print 'No data in miniseed file for '+tr.stats.station+\
                # ' removing picks'
                # picks_out=[picks_out[i] for i in xrange(len(picks_out))\
                # if i not in sta_picks]
                break
            # Get the amplitude
            amplitude, period, delay = _max_p2t(tr.data, tr.stats.delta)
            if amplitude == 0.0:
                break
            print 'Amplitude picked: ' + str(amplitude)
            # Note, amplitude should be in meters at the moment!
            # Remove the pre-filter response
            if pre_filt:
                # Generate poles and zeros for the filter we used earlier: this
                # is how the filter is designed in the convenience methods of
                # filtering in obspy.
                z, p, k = iirfilter(corners, [
                    lowcut / (0.5 * tr.stats.sampling_rate), highcut /
                    (0.5 * tr.stats.sampling_rate)
                ],
                                    btype='band',
                                    ftype='butter',
                                    output='zpk')
                filt_paz = {
                    'poles': list(p),
                    'zeros': list(z),
                    'gain': k,
                    'sensitivity': 1.0
                }
                amplitude /= (paz2AmpValueOfFreqResp(filt_paz, 1 / period) *
                              filt_paz['sensitivity'])
            # Convert amplitude to mm
            if PAZ:  # Divide by Gain to get to nm (returns pm? 10^-12)
                # amplitude *=PAZ['gain']
                amplitude /= 1000
            if seedresp:  # Seedresp method returns mm
                amplitude *= 1000000
            # Write out the half amplitude, approximately the peak amplitude as
            # used directly in magnitude calculations
            # Page 343 of Seisan manual:
            #   Amplitude (Zero-Peak) in units of nm, nm/s, nm/s^2 or counts
            amplitude *= 0.5
            # Generate a PICK type object for this pick
            picks_out.append(
                Sfile_util.PICK(station=tr.stats.station,
                                channel=tr.stats.channel,
                                impulsivity=' ',
                                phase='IAML',
                                weight='',
                                polarity=' ',
                                time=tr.stats.starttime + delay,
                                coda=999,
                                amplitude=amplitude,
                                peri=period,
                                azimuth=float('NaN'),
                                velocity=float('NaN'),
                                AIN=999,
                                SNR='',
                                azimuthres=999,
                                timeres=float('NaN'),
                                finalweight=999,
                                distance=hypo_dist,
                                CAZ=CAZ))
    # Copy the header from the sfile to a new local S-file
    fin = open(sfile, 'r')
    fout = open('mag_calc.out', 'w')
    for line in fin:
        if not line[79] == '7':
            fout.write(line)
        else:
            fout.write(line)
            break
    fin.close()
    for pick in picks_out:
        fout.write(pick)
        # Note this uses the legacy pick class
    fout.close()
    # Write picks out to new s-file
    for pick in picks_out:
        print pick
    # Sfile_util.populateSfile('mag_calc.out', picks_out)
    return picks_out
Пример #3
0
def write_correlations(event_list, wavbase, extract_len, pre_pick, shift_len,\
                       lowcut=1.0, highcut=10.0, max_sep=4, min_link=8, \
                       coh_thresh=0.0):
    """
    Function to write a dt.cc file for hypoDD input - takes an input list of
    events and computes pick refienements by correlation.

    Note that this is **NOT** fast.

    :type event_list: List of tuple
    :param event_list: List of tuples of event_id (int) and sfile (String)
    :type wavbase: string
    :param wavbase: Path to the seisan wave directory that the wavefiles in the
                    S-files are stored
    :type extract_len: float
    :param extract_len: Length in seconds to extract around the pick
    :type pre_pick: float
    :param pre_pick: Time before the pick to start the correclation window
    :type shift_len: float
    :param shift_len: Time to allow pick to vary
    :type lowcut: float
    :param lowcut: Lowcut in Hz - default=1.0
    :type highcut: float
    :param highcut: Highcut in Hz - deafult=10.0
    :type max_sep: float
    :param max_sep: Maximum seperation between event pairs in km
    :type min_link: int
    :param min_link: Minimum links for an event to be paired
    """
    from obspy.signal.cross_correlation import xcorrPickCorrection
    import matplotlib.pyplot as plt
    from obspy import read
    from eqcorrscan.utils.mag_conv import dist_calc
    import glob
    corr_list=[]
    f=open('dt.cc','w')
    for i in xrange(len(event_list)):
        master_sfile=event_list[i][1]
        master_event_id=event_list[i][0]
        master_picks=Sfile_util.readpicks(master_sfile)
        master_ori_time=Sfile_util.readheader(master_sfile).time
        master_location=(Sfile_util.readheader(master_sfile).latitude,\
                         Sfile_util.readheader(master_sfile).longitude,\
                         Sfile_util.readheader(master_sfile).depth)
        master_wavefiles=Sfile_util.readwavename(master_sfile)
        masterpath=glob.glob(wavbase+os.sep+'????'+os.sep+'??'+os.sep+master_wavefiles[0])
        if masterpath:
            masterstream=read(masterpath[0])
        if len(master_wavefiles)>1:
            for wavefile in master_wavefiles:
                wavepath=glob.glob(wavbase+os.sep+'*'+os.sep+'*'+os.sep+wavefile)
                if wavepath:
                    masterstream+=read(wavepath[0])
                else:
                    raise IOError("Couldn't find wavefile")
        for j in xrange(i+1,len(event_list)):
            # Use this tactic to only output unique event pairings
            slave_sfile=event_list[j][1]
            slave_event_id=event_list[j][0]
            slave_wavefiles=Sfile_util.readwavename(slave_sfile)
            try:
                slavestream=read(wavbase+'/*/*/'+slave_wavefiles[0])
            except:
                raise IOError('No wavefile found: '+slave_wavefiles[0]+' '+slave_sfile)
            if len(slave_wavefiles)>1:
                for wavefile in slave_wavefiles:
                    slavestream+=read(wavbase+'/*/*/'+wavefile)
            # Write out the header line
            event_text='#'+str(master_event_id).rjust(10)+\
                    str(slave_event_id).rjust(10)+' 0.0   \n'
            slave_picks=Sfile_util.readpicks(slave_sfile)
            slave_ori_time=Sfile_util.readheader(slave_sfile).time
            slave_location=(Sfile_util.readheader(slave_sfile).latitude,\
                         Sfile_util.readheader(slave_sfile).longitude,\
                         Sfile_util.readheader(slave_sfile).depth)
            if dist_calc(master_location, slave_location) > max_sep:
                break
            links=0
            phases=0
            for pick in master_picks:
                if pick.phase not in ['P','S']:
                    continue # Only use P and S picks, not amplitude or 'other'
                # Find station, phase pairs
                slave_matches=[p for p in slave_picks if p.station==pick.station\
                               and p.phase==pick.phase]
                if masterstream.select(station=pick.station, \
                                       channel='*'+pick.channel[-1]):
                    mastertr=masterstream.select(station=pick.station, \
                                                 channel='*'+pick.channel[-1])[0]
                else:
                    print 'No waveform data for '+pick.station+'.'+pick.channel
                    print pick.station+'.'+pick.channel+' '+slave_sfile+' '+master_sfile
                    break
                # Loop through the matches
                for slave_pick in slave_matches:
                    if slavestream.select(station=slave_pick.station,\
                                          channel='*'+slave_pick.channel[-1]):
                        slavetr=slavestream.select(station=slave_pick.station,\
                                               channel='*'+slave_pick.channel[-1])[0]
                    else:
                        print 'No slave data for '+slave_pick.station+'.'+\
                                slave_pick.channel
                        print pick.station+'.'+pick.channel+' '+slave_sfile+' '+master_sfile
                        break
                    # Correct the picks
                    try:
                        correction, cc = xcorrPickCorrection(pick.time, mastertr,\
                                                             slave_pick.time,\
                                                             slavetr,pre_pick,\
                                                             extract_len-pre_pick, shift_len,\
                                                             filter="bandpass",\
                                                             filter_options={'freqmin':lowcut,
                                                                             'freqmax':highcut},plot=False)
                        # Get the differntial travel time using the corrected time.

                        dt=(pick.time-master_ori_time)-\
                                (slave_pick.time+correction-slave_ori_time)
                        links+=1
                        if cc*cc >= coh_thresh:
                            phases+=1
                            #added by Caro
                            event_text+=pick.station.ljust(4)+\
                                    _cc_round(correction,3).rjust(11)+\
                                    _cc_round(cc,3).rjust(8)+\
                                    ' '+pick.phase+'\n'
                            # links+=1
                        corr_list.append(cc*cc)
                    except:
                        continue
            if links >= min_link and phases > 0:
                f.write(event_text)
    plt.hist(corr_list, 150)
    plt.show()
    # f.write('\n')
    f.close()
    f2.close()
    return
Пример #4
0
def from_sfile(sfile, lowcut, highcut, samp_rate, filt_order, length, swin,\
               debug=0):
    """
    Function to read in picks from sfile then generate the template from the
    picks within this and the wavefile found in the pick file.

    :type sfile: string
    :param sfile: sfilename must be the\
    path to a seisan nordic type s-file containing waveform and pick\
    information.
    :type lowcut: float
    :param lowcut: Low cut (Hz), if set to None will look in template\
            defaults file
    :type highcut: float
    :param lowcut: High cut (Hz), if set to None will look in template\
            defaults file
    :type samp_rate: float
    :param samp_rate: New sampling rate in Hz, if set to None will look in\
            template defaults file
    :type filt_order: int
    :param filt_order: Filter level, if set to None will look in\
            template defaults file
    :type swin: str
    :param swin: Either 'all', 'P' or 'S', to select which phases to output.
    :type length: float
    :param length: Extract length in seconds, if None will look in template\
            defaults file.
    :type debug: int
    :param debug: Debug level, higher number=more output.
    """
    # Perform some checks first
    import os
    import sys
    if not os.path.isfile(sfile):
        raise IOError('sfile does not exist')

    from eqcorrscan.utils import Sfile_util
    # Read in the header of the sfile
    wavefiles=Sfile_util.readwavename(sfile)
    pathparts=sfile.split('/')[0:len(sfile.split('/'))-1]
    wavpath=''
    for part in pathparts:
        if part == 'REA':
            part='WAV'
        wavpath+=part+'/'
    from obspy import read as obsread
    from eqcorrscan.utils import pre_processing
    # Read in waveform file
    for wavefile in wavefiles:
        print "I am going to read waveform data from: "+wavpath+wavefile
        if 'st' in locals():
            st+=obsread(wavpath+wavefile)
        else:
            st=obsread(wavpath+wavefile)
    for tr in st:
        if tr.stats.sampling_rate < samp_rate:
            print 'Sampling rate of data is lower than sampling rate asked for'
            print 'As this is not good practice for correlations I will not do this'
            raise ValueError("Trace: "+tr.stats.station+" sampling rate: "+\
                             str(tr.stats.sampling_rate))
    # Read in pick info
    picks=Sfile_util.readpicks(sfile)
    print "I have found the following picks"
    for pick in picks:
        print pick.station+' '+pick.channel+' '+pick.phase+' '+str(pick.time)

    # Process waveform data
    st=pre_processing.shortproc(st, lowcut, highcut, filt_order,\
                      samp_rate, debug)
    st1=_template_gen(picks, st, length, swin)
    return st1
Пример #5
0
def family_calc(template, detections, wavdir, cut=(-0.5, 3.0),\
                freqmin=5.0, freqmax=15.0, corr_thresh=0.9, \
                template_pre_pick=0.1, samp_rate=100.0, plotvar=False,\
                resample=True):
    """
    Function to calculate the magnitudes for a given family, where the template
    is an s-file with a magnitude (and an appropriate waveform in the same
    directory), and the detections is a list of s-files for that template.

    :type template: str
    :param template: path to the template for this family
    :type detections: List of str
    :param detections: List of paths for s-files detected for this family
    :type wavdir: str
    :param wavdir: Path to the detection waveforms
    :type cut: tuple of float
    :param cut: Cut window around P-pick
    :type freqmin: float
    ;param freqmin: Low-cut in Hz
    :type freqmax: float
    :param freqmax: High-cut in Hz
    :type corr_thresh: float
    :param corr:thresh: Minimum correlation (with stack) for use in SVD
    :type template_pre_pick: float
    :param template_pre_pick: Pre-pick used for template in seconds
    :type samp_rate: float
    :param samp_rate: Desired sampling rate in Hz

    :returns: np.ndarry of relative magnitudes
    """
    from obspy import read, Stream
    from eqcorrscan.utils import stacking, clustering
    from eqcorrscan.core.match_filter import normxcorr2
    import numpy as np
    from obspy.signal.cross_correlation import xcorr

    # First read in the template and check that is has a magnitude
    template_mag = Sfile_util.readheader(template).Mag_1
    template_magtype = Sfile_util.readheader(template).Mag_1_type
    if template_mag=='nan' or template_magtype != 'L':
        raise IOError('Template does not have a local magnitude, calculate this')

    # Now we need to load all the waveforms and picks
    all_detection_streams=[] # Empty list for all the streams
    all_p_picks=[] # List for all the P-picks
    event_headers=[] # List of event headers which we will return
    for detection in detections:
        event_headers.append(Sfile_util.readheader(detection))
        d_picks=Sfile_util.readpicks(detection)
        try:
            d_stream=read(wavdir+'/'+Sfile_util.readwavename(detection)[0])
        except IOError:
            # Allow for seisan year/month directories
            d_stream=read(wavdir+'/????/??/'+Sfile_util.readwavename(detection)[0])
        except:
            raise IOError('Cannot read waveform')
        # Resample the stream
        if resample:
            d_stream = d_stream.detrend('linear')
            d_stream = d_stream.resample(samp_rate)
        # We only want channels with a p-pick, these should be vertical channels
        picked=[]
        p_picks= []
        for pick in d_picks:
            pick.time-=template_pre_pick
            print pick.time
            if pick.phase[-1]=='P':
        # p_picks=[]SVD_moment
                p_picks.append(pick)
                tr=d_stream.select(station=pick.station,\
                                   channel='??'+pick.channel[-1])
                print tr
                if len(tr) >= 1:
                    tr=tr[0]
                else:
                    print 'No channel for pick'
                    print pick
                    break
                # Filter the trace
                tr=tr.filter('bandpass', freqmin=freqmin, freqmax=freqmax)
                # Trim the trace around the P-pick
                tr.trim(pick.time+cut[0]-0.05, pick.time+cut[1]+0.5)
                picked.append(tr)
        picked=Stream(picked)
        # Add this to the list of streams
        all_detection_streams.append(picked)
        all_p_picks.append(p_picks)
    # Add the template in
    template_stream = read('/'.join(template.split('/')[0:-1])+'/'+\
                           Sfile_util.readwavename(template)[0])
    # Resample
    if resample:
        template_stream = template_stream.detrend('linear')
        template_stream = template_stream.resample(samp_rate)
    template_picks = Sfile_util.readpicks(template)
    picked=[]
    p_picks=[]
    for pick in template_picks:
        pick.time-=template_pre_pick
        if pick.phase=='P':
            p_picks.append(pick)
            tr=template_stream.select(station=pick.station,\
                                   channel='??'+pick.channel[-1])
            if len(tr) >= 1:
                tr=tr[0]
                # Filter the trace
                tr=tr.filter('bandpass', freqmin=freqmin, freqmax=freqmax)
                # Trim the trace around the P-pick
                tr.trim(pick.time+cut[0]-0.05, pick.time+cut[1]+0.5)
                picked.append(tr)
            else:
                print 'No channel for pick'
                print pick
    all_detection_streams.append(Stream(picked))
    print ' I have read in '+str(len(all_detection_streams))+' streams of data'
    all_p_picks.append(p_picks)
    # We now have a list of bandpassed, trimmed streams for all P-picked channels
    # Lets align them
    stachans=[tr.stats.station+'.'+tr.stats.channel\
              for st in all_detection_streams for tr in st]
    stachans=list(set(stachans))
    for i in range(len(stachans)):
        chan_traces=[]
        chan_pick_indexes=[] # Need this for next crop
        for j, detection_stream in enumerate(all_detection_streams):
            stachan=stachans[i]
            # If there is a pick/data for this channel then add it to the list
            detection_trace=detection_stream.select(station=stachan.split('.')[0],\
                                                    channel=stachan.split('.')[1])
            if len(detection_trace)==1:
                chan_traces.append(detection_trace[0])
                chan_pick_indexes.append(j)
            elif len(detection_trace) > 1:
                print 'More than one trace for '+stachan
                chan_traces.append(detection_trace[0])
                chan_pick_indexes.append(j)
        # shiftlen=int(0.4 * (cut[1] - cut[0]) * chan_traces[0].stats.sampling_rate)
        # shiftlen=400
        # shiftlen=200
        shiftlen=10
        shifts, ccs = stacking.align_traces(chan_traces, shiftlen,\
                                       master=chan_traces[-1])
                                       # master=master)
        # Shift by up to 0.5s
        # Ammend the picks using the shifts
        for j in range(len(shifts)):
            shift=shifts[j]
            pick_index=chan_pick_indexes[j] # Tells me which stream to look at
            for pick in all_p_picks[pick_index]:
                if pick.station==stachan.split('.')[0]:# and\
                   # pick.channel=='*'+stachan.split('.')[1][-1]:
                    pick.time-=shift
                    print 'Shifting '+pick.station+' by '+str(shift)+\
                            ' for correlation at '+str(ccs[j])
    # We now have amended picks, now we need to re-trim to complete the alignment
    for i in range(len(all_detection_streams)):
        for j in range(len(all_detection_streams[i])):
            all_detection_streams[i][j].trim(all_p_picks[i][j].time+cut[0], \
                                             all_p_picks[i][j].time+cut[1], \
                                             pad=True, fill_value=0,\
                                             nearest_sample=True)
    # Do a real small-scale adjustment, the stack will be better now
    # for i in range(len(stachans)):
        # chan_traces=[]
        # chan_pick_indexes=[] # Need this for next crop
        # for j, detection_stream in enumerate(all_detection_streams):
            # stachan=stachans[i]
            # # If there is a pick/data for this channel then add it to the list
            # detection_trace=detection_stream.select(station=stachan.split('.')[0],\
                                                    # channel=stachan.split('.')[1])
            # if len(detection_trace)==1:
                # chan_traces.append(detection_trace[0])
                # chan_pick_indexes.append(j)
            # elif len(detection_trace) > 1:
                # print 'More than one trace for '+stachan
                # chan_traces.append(detection_trace[0])
                # chan_pick_indexes.append(j)
        # master=stacking.linstack([Stream(tr) for tr in chan_traces])[0]
        # shifts, ccs = stacking.align_traces(chan_traces, 10,\
                                       # master=master)
        # # Shift by up to 0.5s
        # # Ammend the picks using the shifts
        # for j in range(len(shifts)):
            # shift=shifts[j]
            # pick_index=chan_pick_indexes[j] # Tells me which stream to look at
            # for pick in all_p_picks[pick_index]:
                # if pick.station==stachan.split('.')[0]:# and\
                   # # pick.channel=='*'+stachan.split('.')[1][-1]:
                    # pick.time-=shift
                    # print 'Shifting '+pick.station+' by '+str(shift)+\
                            # ' for correlation at '+str(ccs[j])
    # # We now have amended picks, now we need to re-trim to complete the alignment
    # for i in range(len(all_detection_streams)):
        # for j in range(len(all_detection_streams[i])):
            # all_detection_streams[i][j].trim(all_p_picks[i][j].time+cut[0], \
                                             # all_p_picks[i][j].time+cut[1], \
                                             # pad=True, fill_value=0,\
                                             # nearest_sample=True)


    #--------------------------------------------------------------------------
    # Now we have completely aligned traces:
    # We need to remove poorly correlated traces before we compute the SVD
    # We also want to record which stachans have channels for which events
    stachan_event_list=[]
    for stachan in stachans:
        chan_traces=[]
        event_list=[]
        final_event_list=[] # List for the final indexes of events for this stachan
        for i in range(len(all_detection_streams)):
            # Extract channel
            st=all_detection_streams[i]
            tr=st.select(station=stachan.split('.')[0],\
                         channel=stachan.split('.')[1])
            if not len(tr) == 0:
                chan_traces.append(tr[0])
                event_list.append(i)
        # enforce fixed length
        for tr in chan_traces:
            tr.data=tr.data[0:int( tr.stats.sampling_rate * \
                                  ( cut[1] - cut[0] ))]
        # Compute the stack and compare to this
        chan_traces=[Stream(tr) for tr in chan_traces]
        # stack=stacking.linstack(chan_traces)
        stack=chan_traces[-1]
        chan_traces=[st[0] for st in chan_traces]
        if plotvar:
            fig, axes = plt.subplots(len(chan_traces)+1, 1, sharex=True,\
                                     figsize=(7, 12))
            axes=axes.ravel()
            axes[0].plot(stack[0].data, 'r', linewidth=1.5)
            axes[0].set_title(chan_traces[0].stats.station+'.'+\
                              chan_traces[0].stats.channel)
            axes[0].set_ylabel('Stack')
        for i, tr in enumerate(chan_traces):
            if plotvar:
                axes[i+1].plot(tr.data, 'k', linewidth=1.5)
            # corr = normxcorr2(tr.data.astype(np.float32),\
                              # stack[0].data.astype(np.float32))
            dummy, corr = xcorr(tr.data.astype(np.float32),\
                                 stack[0].data.astype(np.float32), 1)
            corr=np.array(corr).reshape(1,1)
            if plotvar:
                axes[i+1].set_ylabel(str(round(corr[0][0],2)))
            if corr[0][0] < corr_thresh:
                # Remove the channel
                print str(corr)+' for channel '+tr.stats.station+'.'+\
                        tr.stats.channel+' event '+str(i)
                all_detection_streams[event_list[i]].remove(tr)
            else:
                final_event_list.append(event_list[i])
        if plotvar:
           plt.show()
        # We should require at-least three detections per channel used
        # Compute the SVD
        if len(final_event_list) >= 3:
            stachan_event_list.append((stachan, final_event_list))
        else:
            for i in range(len(all_detection_streams)):
                tr=all_detection_streams[i].select(station=stachan.split('.')[0])
                if not len(tr) == 0:
                    all_detection_streams[i].remove(tr[0])
    # Remove empty streams
    filled_streams=[]
    for stream in all_detection_streams:
        if not len(stream) == 0:
            filled_streams.append(stream)
    all_detection_streams = filled_streams
    # Now we have the streams that are highly enough correlated and the list of
    # which events these correspond to
    print len(all_detection_streams)
    print stachan_event_list
    if len(all_detection_streams) > 0 and len(all_detection_streams[0]) > 0:
        V, s, U, out_stachans = clustering.SVD(all_detection_streams)
        # Reorder the event list
        event_list=[]
        event_stachans=[]
        for out_stachan in out_stachans:
            for stachan in stachan_event_list:
                if stachan[0] == out_stachan:
                    event_list.append(stachan[1])
                    event_stachans.append(stachan[0])
                    print len(stachan[1])
        print event_list
        relative_moments, event_list = SVD_moments(U, s, V, event_stachans,\
                                                   event_list)
        print '\n\nRelative moments: '
        print relative_moments
        for stachan in stachan_event_list:
            print stachan
        # Now we have the relative moments for all appropriate events - this should
        # include the template event also, which has a manually determined magnitude
        # Check that we have got the template event
        if not event_list[-1] == len(detections):
            print 'Template not included in relative magnitude, fail'
            print 'Largest event in event_list: '+str(event_list[-1])
            print 'You gave me '+str(len(detections))+' detections'
            return False
        # Convert the template magnitude to seismic moment
        template_moment = local_to_moment(template_mag)
        # Extrapolate from the template moment - relative moment relationship to
        # Get the moment for relative moment = 1.0
        norm_moment = template_moment / relative_moments[-1]
        # Template is the last event in the list
        # Now these are weights which we can multiple the moments by
        moments = relative_moments * norm_moment
        print 'Moments '
        print moments
        # Now convert to Mw
        Mw = [2.0/3.0 * (np.log10(M) - 9.0 ) for M in moments]
        print 'Moment magnitudes: '
        print Mw
        # Convert to local
        Ml = [ 0.88 * M + 0.73 for M in Mw ]
        print 'Local magnitudes: '
        print Ml
        print 'Template_magnitude: '
        print template_mag
        i=0
        for event_id in event_list[0:-1]:
            print event_id
            print Ml[i]
            event_headers[event_id].Mag_2=Ml[i]
            event_headers[event_id].Mag_2_type='S'
            i+=1
        return event_headers
    else:
        print 'No useful channels'
        print all_detection_streams
        return False
Пример #6
0
def from_sfile(sfile,
               lowcut,
               highcut,
               samp_rate,
               filt_order,
               length,
               swin,
               debug=0,
               plot=False):
    r"""Function to read in picks from sfile then generate the template from \
    the picks within this and the wavefile found in the pick file.

    :type sfile: string
    :param sfile: sfilename must be the \
        path to a seisan nordic type s-file containing waveform and pick \
        information.
    :type lowcut: float
    :param lowcut: Low cut (Hz), if set to None will look in template \
            defaults file
    :type highcut: float
    :param highcut: High cut (Hz), if set to None will look in template \
            defaults file
    :type samp_rate: float
    :param samp_rate: New sampling rate in Hz, if set to None will look in \
            template defaults file
    :type filt_order: int
    :param filt_order: Filter level, if set to None will look in \
            template defaults file
    :type swin: str
    :param swin: Either 'all', 'P' or 'S', to select which phases to output.
    :type length: float
    :param length: Extract length in seconds, if None will look in template \
            defaults file.
    :type debug: int
    :param debug: Debug level, higher number=more output.
    :type plot: bool
    :param plot: Turns template plotting on or off.

    :returns: obspy.Stream Newly cut template
    """
    # Perform some checks first
    import os
    if not os.path.isfile(sfile):
        raise IOError('sfile does not exist')

    from eqcorrscan.utils import pre_processing
    from eqcorrscan.utils import Sfile_util
    from obspy import read as obsread
    # Read in the header of the sfile
    wavefiles = Sfile_util.readwavename(sfile)
    pathparts = sfile.split('/')[0:-1]
    new_path_parts = []
    for part in pathparts:
        if part == 'REA':
            part = 'WAV'
        new_path_parts.append(part)
    # * argument to allow .join() to accept a list
    wavpath = os.path.join(*new_path_parts) + '/'
    # In case of absolute paths (not handled with .split() --> .join())
    if sfile[0] == '/':
        wavpath = '/' + wavpath
    # Read in waveform file
    for wavefile in wavefiles:
        print(''.join(
            ["I am going to read waveform data from: ", wavpath, wavefile]))
        if 'st' not in locals():
            st = obsread(wavpath + wavefile)
        else:
            st += obsread(wavpath + wavefile)
    for tr in st:
        if tr.stats.sampling_rate < samp_rate:
            print('Sampling rate of data is lower than sampling rate asked ' +
                  'for')
            print('Not good practice for correlations: I will not do this')
            raise ValueError("Trace: " + tr.stats.station +
                             " sampling rate: " + str(tr.stats.sampling_rate))
    # Read in pick info
    catalog = Sfile_util.readpicks(sfile)
    # Read the list of Picks for this event
    picks = catalog[0].picks
    print("I have found the following picks")
    for pick in picks:
        print(' '.join([
            pick.waveform_id.station_code, pick.waveform_id.channel_code,
            pick.phase_hint,
            str(pick.time)
        ]))

    # Process waveform data
    st.merge(fill_value='interpolate')
    st = pre_processing.shortproc(st, lowcut, highcut, filt_order, samp_rate,
                                  debug)
    st1 = _template_gen(picks, st, length, swin, plot=plot)
    return st1
Пример #7
0
def from_sfile(sfile,
               lowcut,
               highcut,
               samp_rate,
               filt_order,
               length,
               swin,
               debug=0):
    r"""Function to read in picks from sfile then generate the template from the
    picks within this and the wavefile found in the pick file.

    :type sfile: string
    :param sfile: sfilename must be the\
    path to a seisan nordic type s-file containing waveform and pick\
    information.
    :type lowcut: float
    :param lowcut: Low cut (Hz), if set to None will look in template\
            defaults file
    :type highcut: float
    :param lowcut: High cut (Hz), if set to None will look in template\
            defaults file
    :type samp_rate: float
    :param samp_rate: New sampling rate in Hz, if set to None will look in\
            template defaults file
    :type filt_order: int
    :param filt_order: Filter level, if set to None will look in\
            template defaults file
    :type swin: str
    :param swin: Either 'all', 'P' or 'S', to select which phases to output.
    :type length: float
    :param length: Extract length in seconds, if None will look in template\
            defaults file.
    :type debug: int
    :param debug: Debug level, higher number=more output.
    """
    # Perform some checks first
    import os
    if not os.path.isfile(sfile):
        raise IOError('sfile does not exist')

    from eqcorrscan.utils import pre_processing
    from eqcorrscan.utils import Sfile_util
    from obspy import read as obsread
    # Read in the header of the sfile
    wavefiles = Sfile_util.readwavename(sfile)
    pathparts = sfile.split('/')[0:-1]
    for part in pathparts:
        if part == 'REA':
            part = 'WAV'
    wavpath = os.path.join(pathparts)
    # Read in waveform file
    for wavefile in wavefiles:
        print ''.join(
            ["I am going to read waveform data from: ", wavpath, wavefile])
        if 'st' not in locals():
            st = obsread(wavpath + wavefile)
        else:
            st += obsread(wavpath + wavefile)
    for tr in st:
        if tr.stats.sampling_rate < samp_rate:
            print 'Sampling rate of data is lower than sampling rate asked for'
            print 'Not good practice for correlations: I will not do this'
            raise ValueError("Trace: " + tr.stats.station +
                             " sampling rate: " + str(tr.stats.sampling_rate))
    # Read in pick info
    picks = Sfile_util.readpicks(sfile)
    print "I have found the following picks"
    for pick in picks:
        print ' '.join(
            [pick.station, pick.channel, pick.phase,
             str(pick.time)])

    # Process waveform data
    st = pre_processing.shortproc(st, lowcut, highcut, filt_order, samp_rate,
                                  debug)
    st1 = _template_gen(picks, st, length, swin)
    return st1