Exemplo n.º 1
0
def volume_plot(stationpath, database, limits):
    """
    Function to read in station information from a file and earthquake info
    from sfiles.

    :type stationpath: str
    :type databse: str
    """
    from eqcorrscan.utils import Sfile_util
    import glob
    sfiles = glob.glob(database + '/*/*/*')
    eqlocs = []
    for sfile in sfiles:
        try:
            eqlocs+=[(Sfile_util.readheader(sfile).latitude,\
                    Sfile_util.readheader(sfile).longitude,\
                    Sfile_util.readheader(sfile).depth)]
        except:
            continue
    stalocs = []
    f = open(stationpath, 'r')
    for line in f:
        stalocs+=[(float(line.split(',')[1]),\
                float(line.split(',')[0]), float(line.split(',')[4])/1000)]
    f.close()
    from utils import EQcorrscan_plotting
    EQcorrscan_plotting.threeD_seismplot(stalocs, eqlocs, limits)
    return
Exemplo n.º 2
0
def volume_plot(stationpath, database, limits):
    """
    Function to read in station information from a file and earthquake info
    from sfiles.

    :type stationpath: str
    :type databse: str
    """
    from eqcorrscan.utils import Sfile_util
    import glob

    sfiles = glob.glob(database + "/*/*/*")
    eqlocs = []
    for sfile in sfiles:
        try:
            eqlocs += [
                (
                    Sfile_util.readheader(sfile).latitude,
                    Sfile_util.readheader(sfile).longitude,
                    Sfile_util.readheader(sfile).depth,
                )
            ]
        except:
            continue
    stalocs = []
    f = open(stationpath, "r")
    for line in f:
        stalocs += [(float(line.split(",")[1]), float(line.split(",")[0]), float(line.split(",")[4]) / 1000)]
    f.close()
    from utils import EQcorrscan_plotting

    EQcorrscan_plotting.threeD_seismplot(stalocs, eqlocs, limits)
    return
Exemplo n.º 3
0
def write_event(sfile_list):
    """
    Function to write out an event.dat file of the events

    :type sfile_list: List
    :param sfile_list: List of s-files to sort and put into the database

    :returns: List of tuples of event ID (int) and Sfile name
    """
    event_list=[]
    sort_list=[(Sfile_util.readheader(sfile).time, sfile) for sfile in sfile_list]
    sort_list.sort(key=lambda tup:tup[0])
    sfile_list=[sfile[1] for sfile in sort_list]
    i=0
    f=open('event.dat','w')
    for sfile in sfile_list:
        i+=1
        event_list.append((i, sfile))
        evinfo=Sfile_util.readheader(sfile)
        f.write(str(evinfo.time.year)+str(evinfo.time.month).zfill(2)+\
                str(evinfo.time.day).zfill(2)+'  '+\
                str(evinfo.time.hour).rjust(2)+str(evinfo.time.minute).zfill(2)+\
                str(evinfo.time.second).zfill(2)+\
                str(evinfo.time.microsecond)[0:2].zfill(2)+'  '+\
                str(evinfo.latitude).ljust(8,'0')+'   '+\
                str(evinfo.longitude).ljust(8,'0')+'  '+\
                str(evinfo.depth).rjust(7).ljust(9,'0')+'   '+\
                str(evinfo.Mag_1)+'    0.00    0.00   '+\
                str(evinfo.t_RMS).ljust(4,'0')+\
                str(i).rjust(11)+'\n')
    f.close()
    return event_list
Exemplo n.º 4
0
def interev_mag_sfiles(sfiles):
    """
    Function to plot interevent-time versus magnitude for series of events.
    Wrapper for interev_mag.

    :type sfiles: List
    :param sfiles: List of sfiles to read from
    """
    from eqcorrscan.utils import Sfile_util
    times = [Sfile_util.readheader(sfile).time for sfile in sfiles]
    mags = [Sfile_util.readheader(sfile).Mag_1 for sfile in sfiles]
    interev_mag(times, mags)
def interev_mag_sfiles(sfiles):
    """
    Function to plot interevent-time versus magnitude for series of events.
    Wrapper for interev_mag.

    :type sfiles: List
    :param sfiles: List of sfiles to read from
    """
    from eqcorrscan.utils import Sfile_util
    times=[Sfile_util.readheader(sfile).time for sfile in sfiles]
    mags=[Sfile_util.readheader(sfile).Mag_1 for sfile in sfiles]
    interev_mag(times, mags)
Exemplo n.º 6
0
def run():
    """
    Where we call all the available tests from
    """
    from eqcorrscan.utils import Sfile_util
    assert test_import() == True
    assert Sfile_util.test_rw() == True
Exemplo n.º 7
0
def run():
    """
    Where we call all the available tests from
    """
    from eqcorrscan.utils import Sfile_util
    assert test_import() == True
    assert Sfile_util.test_rw() == True
Exemplo n.º 8
0
def lag_calc(detections, detect_data, templates, shift_len=0.2, min_cc=0.4):
    """
    Overseer function to take a list of detection objects, cut the data for
    them to lengths of the same length of the template + shift_len on
    either side. This will then write out SEISAN s-file for the detections
    with pick times based on the lag-times found at the maximum correlation,
    providing that correlation is above the min_cc.

    :type detections: List of DETECTION
    :param detections: List of DETECTION objects
    :type detect_data: obspy.Stream
    :param detect_data: All the data needed to cut from - can be a gappy Stream
    :type templates: List of tuple of String, obspy.Stream
    :param templates: List of the templates used as tuples of template name, template
    :type shift_len: float
    :param shift_len: Shift length allowed for the pick in seconds, will be
                    plus/minus this amount - default=0.2
    :type min_cc: float
    :param min_cc: Minimum cross-correlation value to be considered a pick,
                    default=0.4
    """
    from eqcorrscan.utils import Sfile_util
    from obspy import Stream
    # First work out the delays for each template
    delays=[] # List of tuples
    for template in templates:
        temp_delays=[]
        for tr in tempate[1]:
            temp_delays.append((tr.stats.station, tr.stats.channel,\
                    tr.stats.starttime-template.sort['starttime'][0].stats.starttime))
        delays.append((template[0], temp_delays))
    detect_streams=[]
    for detection in detections:
        detect_stream=[]
        for tr in detect_data:
            tr_copy=tr.copy()
            template=[t for t in templates if t[0]==detection.template_name][0]
            template=template.select(station=tr.stats.station,
                            channel=tr.stats.channel)
            if template:
                template_len=len(template[0])
            else:
                continue # If there is no template-data match then skip the rest
                         # of the trace loop.
            delay=[delay for delay in delays if delay[0]==detection.template_name][0]
            delay=[d for d in delay if d[0]==tr.stats.station and \
                    d[1]==tr.stats.channel][0]
            detect_stream.append(tr_copy.trim(starttime=detection.detect_time-\
                        shift_len+delay, endtime=detection.detect_time+delay+\
                        shift_len+template_len))
        detect_streams.append((detection.template_name, Stream(detect_stream)))
        # Tuple of template name and data stream
    # Segregate detections by template
    lags=[]
    for template in templates:
        template_detections=[detect[1] for detect in detect_streams\
                if detect[0]==template[0]]
        lags.append(day_loop(template_detections, template[1]))

    # Write out the lags!
    for event in lags:
        # I think I have an old version of Sfile_util here
        sfilename=Sfile_util.blanksfile(wavefile, 'L', 'PYTH', 'out', True)
        picks=[]
        for pick in event:
            picks.append(Sfile_util.PICK())
        Sfile_util.populateSfile(sfilename, picks)
Exemplo n.º 9
0
def write_catalogue(event_list, max_sep=1, min_link=8):
    """
    Function to write the dt.ct file needed by hypoDD - takes input event list
    from write_event as a list of tuples of event id and sfile.  It will read
    the pick information from the seisan formated s-file using the Sfile_util
    utilities.

    :type event_list: List of tuple
    :param event_list: List of tuples of event_id (int) and sfile (String)
    :type max_sep: float
    :param max_sep: Maximum seperation between event pairs in km
    :type min_link: int
    :param min_link: Minimum links for an event to be paired

    :returns: List stations
    """
    from eqcorrscan.utils.mag_calc import dist_conv
    f=open('dt.ct','w')
    fphase=open('phase.dat','w')
    stations=[]
    evcount=0
    for i in xrange(len(event_list)):
        master_sfile=event_list[i][1]
        master_event_id=event_list[i][0]
        master_picks=Sfile_util.readpicks(master_sfile)
        master_ori_time=Sfile_util.readheader(master_sfile).time
        # print 'Master origin time: '+str(master_ori_time)
        master_location=(Sfile_util.readheader(master_sfile).latitude,\
                         Sfile_util.readheader(master_sfile).longitude,\
                         Sfile_util.readheader(master_sfile).depth)
        header='#  '+str(master_ori_time.year)
        fphase.write(header+'\n')
        for pick in master_picks:
            fphase.write(pick.station+'  '+_cc_round(pick.time-master_ori_time,3)+\
                         '   '+'\n')
        for j in xrange(i+1,len(event_list)):
            # Use this tactic to only output unique event pairings
            slave_sfile=event_list[j][1]
            slave_event_id=event_list[j][0]
            # Write out the header line
            event_text='#'+str(master_event_id).rjust(10)+\
                    str(slave_event_id).rjust(10)+'\n'
            slave_picks=Sfile_util.readpicks(slave_sfile)
            slave_ori_time=Sfile_util.readheader(slave_sfile).time
            slave_location=(Sfile_util.readheader(slave_sfile).latitude,\
                         Sfile_util.readheader(slave_sfile).longitude,\
                         Sfile_util.readheader(slave_sfile).depth)
            if dist_calc(master_location, slave_location) > max_sep:
                break
            links=0 # Count the number of linkages
            for pick in master_picks:
                if pick.phase not in ['P','S']:
                    continue # Only use P and S picks, not amplitude or 'other'
                # Find station, phase pairs
                slave_matches=[p for p in slave_picks if p.station==pick.station\
                               and p.phase==pick.phase]
                # Loop through the matches
                for slave_pick in slave_matches:
                    links+=1
                    event_text+=pick.station.ljust(4)+\
                            _cc_round(pick.time-master_ori_time,3).rjust(11)+\
                            _cc_round(slave_pick.time-slave_ori_time,3).rjust(8)+\
                            _av_weight(pick.weight, slave_pick.weight).rjust(7)+' '+\
                            pick.phase+'\n'
                    stations.append(pick.station)
            if links >= min_link:
                f.write(event_text)
                evcount+=1
    print 'You have '+str(evcount)+' links'
    # f.write('\n')
    f.close()
    fphase.close()
    return list(set(stations))
Exemplo n.º 10
0
def write_correlations(event_list, wavbase, extract_len, pre_pick, shift_len,\
                       lowcut=1.0, highcut=10.0, max_sep=4, min_link=8, \
                       coh_thresh=0.0):
    """
    Function to write a dt.cc file for hypoDD input - takes an input list of
    events and computes pick refienements by correlation.

    Note that this is **NOT** fast.

    :type event_list: List of tuple
    :param event_list: List of tuples of event_id (int) and sfile (String)
    :type wavbase: string
    :param wavbase: Path to the seisan wave directory that the wavefiles in the
                    S-files are stored
    :type extract_len: float
    :param extract_len: Length in seconds to extract around the pick
    :type pre_pick: float
    :param pre_pick: Time before the pick to start the correclation window
    :type shift_len: float
    :param shift_len: Time to allow pick to vary
    :type lowcut: float
    :param lowcut: Lowcut in Hz - default=1.0
    :type highcut: float
    :param highcut: Highcut in Hz - deafult=10.0
    :type max_sep: float
    :param max_sep: Maximum seperation between event pairs in km
    :type min_link: int
    :param min_link: Minimum links for an event to be paired
    """
    from obspy.signal.cross_correlation import xcorrPickCorrection
    import matplotlib.pyplot as plt
    from obspy import read
    from eqcorrscan.utils.mag_conv import dist_calc
    import glob
    corr_list=[]
    f=open('dt.cc','w')
    for i in xrange(len(event_list)):
        master_sfile=event_list[i][1]
        master_event_id=event_list[i][0]
        master_picks=Sfile_util.readpicks(master_sfile)
        master_ori_time=Sfile_util.readheader(master_sfile).time
        master_location=(Sfile_util.readheader(master_sfile).latitude,\
                         Sfile_util.readheader(master_sfile).longitude,\
                         Sfile_util.readheader(master_sfile).depth)
        master_wavefiles=Sfile_util.readwavename(master_sfile)
        masterpath=glob.glob(wavbase+os.sep+'????'+os.sep+'??'+os.sep+master_wavefiles[0])
        if masterpath:
            masterstream=read(masterpath[0])
        if len(master_wavefiles)>1:
            for wavefile in master_wavefiles:
                wavepath=glob.glob(wavbase+os.sep+'*'+os.sep+'*'+os.sep+wavefile)
                if wavepath:
                    masterstream+=read(wavepath[0])
                else:
                    raise IOError("Couldn't find wavefile")
        for j in xrange(i+1,len(event_list)):
            # Use this tactic to only output unique event pairings
            slave_sfile=event_list[j][1]
            slave_event_id=event_list[j][0]
            slave_wavefiles=Sfile_util.readwavename(slave_sfile)
            try:
                slavestream=read(wavbase+'/*/*/'+slave_wavefiles[0])
            except:
                raise IOError('No wavefile found: '+slave_wavefiles[0]+' '+slave_sfile)
            if len(slave_wavefiles)>1:
                for wavefile in slave_wavefiles:
                    slavestream+=read(wavbase+'/*/*/'+wavefile)
            # Write out the header line
            event_text='#'+str(master_event_id).rjust(10)+\
                    str(slave_event_id).rjust(10)+' 0.0   \n'
            slave_picks=Sfile_util.readpicks(slave_sfile)
            slave_ori_time=Sfile_util.readheader(slave_sfile).time
            slave_location=(Sfile_util.readheader(slave_sfile).latitude,\
                         Sfile_util.readheader(slave_sfile).longitude,\
                         Sfile_util.readheader(slave_sfile).depth)
            if dist_calc(master_location, slave_location) > max_sep:
                break
            links=0
            phases=0
            for pick in master_picks:
                if pick.phase not in ['P','S']:
                    continue # Only use P and S picks, not amplitude or 'other'
                # Find station, phase pairs
                slave_matches=[p for p in slave_picks if p.station==pick.station\
                               and p.phase==pick.phase]
                if masterstream.select(station=pick.station, \
                                       channel='*'+pick.channel[-1]):
                    mastertr=masterstream.select(station=pick.station, \
                                                 channel='*'+pick.channel[-1])[0]
                else:
                    print 'No waveform data for '+pick.station+'.'+pick.channel
                    print pick.station+'.'+pick.channel+' '+slave_sfile+' '+master_sfile
                    break
                # Loop through the matches
                for slave_pick in slave_matches:
                    if slavestream.select(station=slave_pick.station,\
                                          channel='*'+slave_pick.channel[-1]):
                        slavetr=slavestream.select(station=slave_pick.station,\
                                               channel='*'+slave_pick.channel[-1])[0]
                    else:
                        print 'No slave data for '+slave_pick.station+'.'+\
                                slave_pick.channel
                        print pick.station+'.'+pick.channel+' '+slave_sfile+' '+master_sfile
                        break
                    # Correct the picks
                    try:
                        correction, cc = xcorrPickCorrection(pick.time, mastertr,\
                                                             slave_pick.time,\
                                                             slavetr,pre_pick,\
                                                             extract_len-pre_pick, shift_len,\
                                                             filter="bandpass",\
                                                             filter_options={'freqmin':lowcut,
                                                                             'freqmax':highcut},plot=False)
                        # Get the differntial travel time using the corrected time.

                        dt=(pick.time-master_ori_time)-\
                                (slave_pick.time+correction-slave_ori_time)
                        links+=1
                        if cc*cc >= coh_thresh:
                            phases+=1
                            #added by Caro
                            event_text+=pick.station.ljust(4)+\
                                    _cc_round(correction,3).rjust(11)+\
                                    _cc_round(cc,3).rjust(8)+\
                                    ' '+pick.phase+'\n'
                            # links+=1
                        corr_list.append(cc*cc)
                    except:
                        continue
            if links >= min_link and phases > 0:
                f.write(event_text)
    plt.hist(corr_list, 150)
    plt.show()
    # f.write('\n')
    f.close()
    f2.close()
    return
Exemplo n.º 11
0
# Now we find the s-file we want to use to generate a template from
data_directory = os.path.join('test_data', 'tutorial_data')
sfiles = glob.glob(os.path.join(data_directory, '*L.S*'))
print sfiles

templates = []
template_names = []
for i, sfile in enumerate(sfiles):
    # Read in the picks from the S-file, note, in the full case one fo the main
    # functions in template_gen would be used rather than this, but for
    # the tutorial we will read in the data here - also note that this
    # template generation is inefficient for multiple templates, if using
    # daylong data for multiple templates you would want to only read
    # the seismic data once and cut it multiple times.
    event = Sfile_util.readpicks(sfile)
    for pick in event.picks:
        print pick
        if 'wavefiles' not in locals():
            wavefiles = glob.glob(
                os.path.join(data_directory,
                             '.'.join([pick.waveform_id.station_code, '*'])))
        else:
            wavefiles += glob.glob(
                os.path.join(data_directory,
                             '.'.join([pick.waveform_id.station_code, '*'])))
    wavefiles = list(set(wavefiles))
    for wavefile in wavefiles:
        print ' '.join(['Reading data from', wavefile])
        if 'st' not in locals():
            st = read(wavefile)
Exemplo n.º 12
0
def from_contbase(sfile,
                  contbase_list,
                  lowcut,
                  highcut,
                  samp_rate,
                  filt_order,
                  length,
                  prepick,
                  swin,
                  debug=0,
                  plot=False):
    r"""Function to read in picks from sfile then generate the template from \
    the picks within this and the wavefiles from the continous database of \
    day-long files.  Included is a section to sanity check that the files are \
    daylong and that they start at the start of the day.  You should ensure \
    this is the case otherwise this may alter your data if your data are \
    daylong but the headers are incorrectly set.

    :type sfile: string
    :param sfile: sfilename must be the path to a seisan nordic type s-file \
            containing waveform and pick information, all other arguments can \
            be numbers save for swin which must be either P, S or all \
            (case-sensitive).
    :type contbase_list: List of tuple of string
    :param contbase_list: List of tuples of the form \
        ['path', 'type', 'network'].  Where path is the path to the \
        continuous database, type is the directory structure, which can be \
        either Yyyyy/Rjjj.01, which is the standard IRIS Year, julian day \
        structure, or, yyyymmdd which is a single directory for every day.
    :type lowcut: float
    :param lowcut: Low cut (Hz), if set to None will look in template \
            defaults file
    :type highcut: float
    :param lowcut: High cut (Hz), if set to None will look in template \
            defaults file
    :type samp_rate: float
    :param samp_rate: New sampling rate in Hz, if set to None will look in \
            template defaults file
    :type filt_order: int
    :param filt_order: Filter level, if set to None will look in \
            template defaults file
    :type length: float
    :param length: Extract length in seconds, if None will look in template \
            defaults file.
    :type prepick: float
    :param prepick: Pre-pick time in seconds
    :type swin: str
    :param swin: Either 'all', 'P' or 'S', to select which phases to output.
    :type debug: int
    :param debug: Level of debugging output, higher=more
    :type plot: bool
    :param plot: Turns template plotting on or off.

    :returns: obspy.Stream Newly cut template
    """
    # Perform some checks first
    import os
    if not os.path.isfile(sfile):
        raise IOError('sfile does not exist')

    # import some things
    from eqcorrscan.utils import pre_processing
    from eqcorrscan.utils import Sfile_util
    import glob
    from obspy import read as obsread

    # Read in the header of the sfile
    event = Sfile_util.readheader(sfile)
    day = event.origins[0].time

    # Read in pick info
    catalog = Sfile_util.readpicks(sfile)
    picks = catalog[0].picks
    print("I have found the following picks")
    pick_chans = []
    used_picks = []
    for pick in picks:
        station = pick.waveform_id.station_code
        channel = pick.waveform_id.channel_code
        phase = pick.phase_hint
        pcktime = pick.time
        if station + channel not in pick_chans and phase in ['P', 'S']:
            pick_chans.append(station + channel)
            used_picks.append(pick)
            print(pick)
            # #########Left off here
            for contbase in contbase_list:
                if contbase[1] == 'yyyy/mm/dd':
                    daydir = os.path.join([
                        str(day.year),
                        str(day.month).zfill(2),
                        str(day.day).zfill(2)
                    ])
                elif contbase[1] == 'Yyyyy/Rjjj.01':
                    daydir = os.path.join([
                        'Y' + str(day.year),
                        'R' + str(day.julday).zfill(3) + '.01'
                    ])
                elif contbase[1] == 'yyyymmdd':
                    daydir = day.datetime.strftime('%Y%m%d')
                if 'wavefiles' not in locals():
                    wavefiles = (glob.glob(
                        os.path.join(
                            [contbase[0], daydir, '*' + station + '.*'])))
                else:
                    wavefiles += glob.glob(
                        os.path.join(
                            [contbase[0], daydir, '*' + station + '.*']))
        elif phase in ['P', 'S']:
            print(' '.join(
                ['Duplicate pick', station, channel, phase,
                 str(pcktime)]))
        elif phase == 'IAML':
            print(' '.join(
                ['Amplitude pick', station, channel, phase,
                 str(pcktime)]))
    picks = used_picks
    wavefiles = list(set(wavefiles))

    # Read in waveform file
    wavefiles.sort()
    for wavefile in wavefiles:
        print("I am going to read waveform data from: " + wavefile)
        if 'st' not in locals():
            st = obsread(wavefile)
        else:
            st += obsread(wavefile)
    # Process waveform data
    st.merge(fill_value='interpolate')
    for tr in st:
        tr = pre_processing.dayproc(tr, lowcut, highcut, filt_order, samp_rate,
                                    debug, day)
    # Cut and extract the templates
    st1 = _template_gen(picks, st, length, swin, prepick=prepick, plot=plot)
    return st1
Exemplo n.º 13
0
def from_sfile(sfile,
               lowcut,
               highcut,
               samp_rate,
               filt_order,
               length,
               swin,
               debug=0,
               plot=False):
    r"""Function to read in picks from sfile then generate the template from \
    the picks within this and the wavefile found in the pick file.

    :type sfile: string
    :param sfile: sfilename must be the \
        path to a seisan nordic type s-file containing waveform and pick \
        information.
    :type lowcut: float
    :param lowcut: Low cut (Hz), if set to None will look in template \
            defaults file
    :type highcut: float
    :param highcut: High cut (Hz), if set to None will look in template \
            defaults file
    :type samp_rate: float
    :param samp_rate: New sampling rate in Hz, if set to None will look in \
            template defaults file
    :type filt_order: int
    :param filt_order: Filter level, if set to None will look in \
            template defaults file
    :type swin: str
    :param swin: Either 'all', 'P' or 'S', to select which phases to output.
    :type length: float
    :param length: Extract length in seconds, if None will look in template \
            defaults file.
    :type debug: int
    :param debug: Debug level, higher number=more output.
    :type plot: bool
    :param plot: Turns template plotting on or off.

    :returns: obspy.Stream Newly cut template
    """
    # Perform some checks first
    import os
    if not os.path.isfile(sfile):
        raise IOError('sfile does not exist')

    from eqcorrscan.utils import pre_processing
    from eqcorrscan.utils import Sfile_util
    from obspy import read as obsread
    # Read in the header of the sfile
    wavefiles = Sfile_util.readwavename(sfile)
    pathparts = sfile.split('/')[0:-1]
    new_path_parts = []
    for part in pathparts:
        if part == 'REA':
            part = 'WAV'
        new_path_parts.append(part)
    # * argument to allow .join() to accept a list
    wavpath = os.path.join(*new_path_parts) + '/'
    # In case of absolute paths (not handled with .split() --> .join())
    if sfile[0] == '/':
        wavpath = '/' + wavpath
    # Read in waveform file
    for wavefile in wavefiles:
        print(''.join(
            ["I am going to read waveform data from: ", wavpath, wavefile]))
        if 'st' not in locals():
            st = obsread(wavpath + wavefile)
        else:
            st += obsread(wavpath + wavefile)
    for tr in st:
        if tr.stats.sampling_rate < samp_rate:
            print('Sampling rate of data is lower than sampling rate asked ' +
                  'for')
            print('Not good practice for correlations: I will not do this')
            raise ValueError("Trace: " + tr.stats.station +
                             " sampling rate: " + str(tr.stats.sampling_rate))
    # Read in pick info
    catalog = Sfile_util.readpicks(sfile)
    # Read the list of Picks for this event
    picks = catalog[0].picks
    print("I have found the following picks")
    for pick in picks:
        print(' '.join([
            pick.waveform_id.station_code, pick.waveform_id.channel_code,
            pick.phase_hint,
            str(pick.time)
        ]))

    # Process waveform data
    st.merge(fill_value='interpolate')
    st = pre_processing.shortproc(st, lowcut, highcut, filt_order, samp_rate,
                                  debug)
    st1 = _template_gen(picks, st, length, swin, plot=plot)
    return st1
Exemplo n.º 14
0
def lag_calc(detections, detect_data, templates, shift_len=0.2, min_cc=0.4):
    """
    Overseer function to take a list of detection objects, cut the data for
    them to lengths of the same length of the template + shift_len on
    either side. This will then write out SEISAN s-file for the detections
    with pick times based on the lag-times found at the maximum correlation,
    providing that correlation is above the min_cc.

    :type detections: List of DETECTION
    :param detections: List of DETECTION objects
    :type detect_data: obspy.Stream
    :param detect_data: All the data needed to cut from - can be a gappy Stream
    :type templates: List of tuple of String, obspy.Stream
    :param templates: List of the templates used as tuples of template name, template
    :type shift_len: float
    :param shift_len: Shift length allowed for the pick in seconds, will be
                    plus/minus this amount - default=0.2
    :type min_cc: float
    :param min_cc: Minimum cross-correlation value to be considered a pick,
                    default=0.4
    """
    from eqcorrscan.utils import Sfile_util
    from obspy import Stream
    # First work out the delays for each template
    delays = []  # List of tuples
    for template in templates:
        temp_delays = []
        for tr in tempate[1]:
            temp_delays.append((tr.stats.station, tr.stats.channel,\
                    tr.stats.starttime-template.sort['starttime'][0].stats.starttime))
        delays.append((template[0], temp_delays))
    detect_streams = []
    for detection in detections:
        detect_stream = []
        for tr in detect_data:
            tr_copy = tr.copy()
            template = [
                t for t in templates if t[0] == detection.template_name
            ][0]
            template = template.select(station=tr.stats.station,
                                       channel=tr.stats.channel)
            if template:
                template_len = len(template[0])
            else:
                continue  # If there is no template-data match then skip the rest
                # of the trace loop.
            delay = [
                delay for delay in delays
                if delay[0] == detection.template_name
            ][0]
            delay=[d for d in delay if d[0]==tr.stats.station and \
                    d[1]==tr.stats.channel][0]
            detect_stream.append(tr_copy.trim(starttime=detection.detect_time-\
                        shift_len+delay, endtime=detection.detect_time+delay+\
                        shift_len+template_len))
        detect_streams.append((detection.template_name, Stream(detect_stream)))
        # Tuple of template name and data stream
    # Segregate detections by template
    lags = []
    for template in templates:
        template_detections=[detect[1] for detect in detect_streams\
                if detect[0]==template[0]]
        lags.append(day_loop(template_detections, template[1]))

    # Write out the lags!
    for event in lags:
        # I think I have an old version of Sfile_util here
        sfilename = Sfile_util.blanksfile(wavefile, 'L', 'PYTH', 'out', True)
        picks = []
        for pick in event:
            picks.append(Sfile_util.PICK())
        Sfile_util.populateSfile(sfilename, picks)
Exemplo n.º 15
0
# Now we find the s-file we want to use to generate a template from
sfiles=glob.glob('test_data/tutorial_data/*L.S*')

# Generate the template from these sfiles:
templates=[] # Open a list to be filled - only applies for multiple templates
template_names=[] # List of template names for later ID
i=0 # Template name iterator
for sfile in sfiles:
    # Read in the picks from the S-file, note, in the full case one fo the main\
            # functions in template_gen would be used rather than this, but for\
            # the tutorial we will read in the data here - also note that this\
            # template generation is inefficient for multiple templates, if using\
            # daylong data for multiple templates you would want to only read\
            # the seismic data once and cut it multiple times.
    picks=Sfile_util.readpicks(sfile)
    for pick in picks:
        if not 'wavefiles' in locals():
            wavefiles=glob.glob('test_data/tutorial_data/'+\
                                   pick.station+'.*')
        else:
            wavefiles+=glob.glob('test_data/tutorial_data/'+\
                                 pick.station+'.*')
    wavefiles=list(set(wavefiles))
    for wavefile in wavefiles:
        print 'Reading data from '+wavefile
        if not 'st' in locals():
            st=read(wavefile)
        else:
            st+=read(wavefile)
    st=st.merge(fill_value='interpolate')
Exemplo n.º 16
0
# Now we find the s-file we want to use to generate a template from
data_directory = os.path.join('test_data', 'tutorial_data')
sfiles = glob.glob(os.path.join(data_directory, '*L.S*'))
print(sfiles)

templates = []
template_names = []
for i, sfile in enumerate(sfiles):
    # Read in the picks from the S-file, note, in the full case one fo the main
    # functions in template_gen would be used rather than this, but for
    # the tutorial we will read in the data here - also note that this
    # template generation is inefficient for multiple templates, if using
    # daylong data for multiple templates you would want to only read
    # the seismic data once and cut it multiple times.
    event = Sfile_util.readpicks(sfile)
    for pick in event.picks:
        print(pick)
        if 'wavefiles' not in locals():
            wavefiles = glob.glob(os.path.join(data_directory,
                                               '.'.join([pick.waveform_id.
                                                         station_code, '*'])))
        else:
            wavefiles += glob.glob(os.path.join(data_directory,
                                                '.'.join([pick.waveform_id.
                                                          station_code, '*'])))
    wavefiles = list(set(wavefiles))
    for wavefile in wavefiles:
        print(' '.join(['Reading data from', wavefile]))
        if 'st' not in locals():
            st = read(wavefile)
Exemplo n.º 17
0
def family_calc(template, detections, wavdir, cut=(-0.5, 3.0),\
                freqmin=5.0, freqmax=15.0, corr_thresh=0.9, \
                template_pre_pick=0.1, samp_rate=100.0, plotvar=False,\
                resample=True):
    """
    Function to calculate the magnitudes for a given family, where the template
    is an s-file with a magnitude (and an appropriate waveform in the same
    directory), and the detections is a list of s-files for that template.

    :type template: str
    :param template: path to the template for this family
    :type detections: List of str
    :param detections: List of paths for s-files detected for this family
    :type wavdir: str
    :param wavdir: Path to the detection waveforms
    :type cut: tuple of float
    :param cut: Cut window around P-pick
    :type freqmin: float
    ;param freqmin: Low-cut in Hz
    :type freqmax: float
    :param freqmax: High-cut in Hz
    :type corr_thresh: float
    :param corr:thresh: Minimum correlation (with stack) for use in SVD
    :type template_pre_pick: float
    :param template_pre_pick: Pre-pick used for template in seconds
    :type samp_rate: float
    :param samp_rate: Desired sampling rate in Hz

    :returns: np.ndarry of relative magnitudes
    """
    from obspy import read, Stream
    from eqcorrscan.utils import stacking, clustering
    from eqcorrscan.core.match_filter import normxcorr2
    import numpy as np
    from obspy.signal.cross_correlation import xcorr

    # First read in the template and check that is has a magnitude
    template_mag = Sfile_util.readheader(template).Mag_1
    template_magtype = Sfile_util.readheader(template).Mag_1_type
    if template_mag=='nan' or template_magtype != 'L':
        raise IOError('Template does not have a local magnitude, calculate this')

    # Now we need to load all the waveforms and picks
    all_detection_streams=[] # Empty list for all the streams
    all_p_picks=[] # List for all the P-picks
    event_headers=[] # List of event headers which we will return
    for detection in detections:
        event_headers.append(Sfile_util.readheader(detection))
        d_picks=Sfile_util.readpicks(detection)
        try:
            d_stream=read(wavdir+'/'+Sfile_util.readwavename(detection)[0])
        except IOError:
            # Allow for seisan year/month directories
            d_stream=read(wavdir+'/????/??/'+Sfile_util.readwavename(detection)[0])
        except:
            raise IOError('Cannot read waveform')
        # Resample the stream
        if resample:
            d_stream = d_stream.detrend('linear')
            d_stream = d_stream.resample(samp_rate)
        # We only want channels with a p-pick, these should be vertical channels
        picked=[]
        p_picks= []
        for pick in d_picks:
            pick.time-=template_pre_pick
            print pick.time
            if pick.phase[-1]=='P':
        # p_picks=[]SVD_moment
                p_picks.append(pick)
                tr=d_stream.select(station=pick.station,\
                                   channel='??'+pick.channel[-1])
                print tr
                if len(tr) >= 1:
                    tr=tr[0]
                else:
                    print 'No channel for pick'
                    print pick
                    break
                # Filter the trace
                tr=tr.filter('bandpass', freqmin=freqmin, freqmax=freqmax)
                # Trim the trace around the P-pick
                tr.trim(pick.time+cut[0]-0.05, pick.time+cut[1]+0.5)
                picked.append(tr)
        picked=Stream(picked)
        # Add this to the list of streams
        all_detection_streams.append(picked)
        all_p_picks.append(p_picks)
    # Add the template in
    template_stream = read('/'.join(template.split('/')[0:-1])+'/'+\
                           Sfile_util.readwavename(template)[0])
    # Resample
    if resample:
        template_stream = template_stream.detrend('linear')
        template_stream = template_stream.resample(samp_rate)
    template_picks = Sfile_util.readpicks(template)
    picked=[]
    p_picks=[]
    for pick in template_picks:
        pick.time-=template_pre_pick
        if pick.phase=='P':
            p_picks.append(pick)
            tr=template_stream.select(station=pick.station,\
                                   channel='??'+pick.channel[-1])
            if len(tr) >= 1:
                tr=tr[0]
                # Filter the trace
                tr=tr.filter('bandpass', freqmin=freqmin, freqmax=freqmax)
                # Trim the trace around the P-pick
                tr.trim(pick.time+cut[0]-0.05, pick.time+cut[1]+0.5)
                picked.append(tr)
            else:
                print 'No channel for pick'
                print pick
    all_detection_streams.append(Stream(picked))
    print ' I have read in '+str(len(all_detection_streams))+' streams of data'
    all_p_picks.append(p_picks)
    # We now have a list of bandpassed, trimmed streams for all P-picked channels
    # Lets align them
    stachans=[tr.stats.station+'.'+tr.stats.channel\
              for st in all_detection_streams for tr in st]
    stachans=list(set(stachans))
    for i in range(len(stachans)):
        chan_traces=[]
        chan_pick_indexes=[] # Need this for next crop
        for j, detection_stream in enumerate(all_detection_streams):
            stachan=stachans[i]
            # If there is a pick/data for this channel then add it to the list
            detection_trace=detection_stream.select(station=stachan.split('.')[0],\
                                                    channel=stachan.split('.')[1])
            if len(detection_trace)==1:
                chan_traces.append(detection_trace[0])
                chan_pick_indexes.append(j)
            elif len(detection_trace) > 1:
                print 'More than one trace for '+stachan
                chan_traces.append(detection_trace[0])
                chan_pick_indexes.append(j)
        # shiftlen=int(0.4 * (cut[1] - cut[0]) * chan_traces[0].stats.sampling_rate)
        # shiftlen=400
        # shiftlen=200
        shiftlen=10
        shifts, ccs = stacking.align_traces(chan_traces, shiftlen,\
                                       master=chan_traces[-1])
                                       # master=master)
        # Shift by up to 0.5s
        # Ammend the picks using the shifts
        for j in range(len(shifts)):
            shift=shifts[j]
            pick_index=chan_pick_indexes[j] # Tells me which stream to look at
            for pick in all_p_picks[pick_index]:
                if pick.station==stachan.split('.')[0]:# and\
                   # pick.channel=='*'+stachan.split('.')[1][-1]:
                    pick.time-=shift
                    print 'Shifting '+pick.station+' by '+str(shift)+\
                            ' for correlation at '+str(ccs[j])
    # We now have amended picks, now we need to re-trim to complete the alignment
    for i in range(len(all_detection_streams)):
        for j in range(len(all_detection_streams[i])):
            all_detection_streams[i][j].trim(all_p_picks[i][j].time+cut[0], \
                                             all_p_picks[i][j].time+cut[1], \
                                             pad=True, fill_value=0,\
                                             nearest_sample=True)
    # Do a real small-scale adjustment, the stack will be better now
    # for i in range(len(stachans)):
        # chan_traces=[]
        # chan_pick_indexes=[] # Need this for next crop
        # for j, detection_stream in enumerate(all_detection_streams):
            # stachan=stachans[i]
            # # If there is a pick/data for this channel then add it to the list
            # detection_trace=detection_stream.select(station=stachan.split('.')[0],\
                                                    # channel=stachan.split('.')[1])
            # if len(detection_trace)==1:
                # chan_traces.append(detection_trace[0])
                # chan_pick_indexes.append(j)
            # elif len(detection_trace) > 1:
                # print 'More than one trace for '+stachan
                # chan_traces.append(detection_trace[0])
                # chan_pick_indexes.append(j)
        # master=stacking.linstack([Stream(tr) for tr in chan_traces])[0]
        # shifts, ccs = stacking.align_traces(chan_traces, 10,\
                                       # master=master)
        # # Shift by up to 0.5s
        # # Ammend the picks using the shifts
        # for j in range(len(shifts)):
            # shift=shifts[j]
            # pick_index=chan_pick_indexes[j] # Tells me which stream to look at
            # for pick in all_p_picks[pick_index]:
                # if pick.station==stachan.split('.')[0]:# and\
                   # # pick.channel=='*'+stachan.split('.')[1][-1]:
                    # pick.time-=shift
                    # print 'Shifting '+pick.station+' by '+str(shift)+\
                            # ' for correlation at '+str(ccs[j])
    # # We now have amended picks, now we need to re-trim to complete the alignment
    # for i in range(len(all_detection_streams)):
        # for j in range(len(all_detection_streams[i])):
            # all_detection_streams[i][j].trim(all_p_picks[i][j].time+cut[0], \
                                             # all_p_picks[i][j].time+cut[1], \
                                             # pad=True, fill_value=0,\
                                             # nearest_sample=True)


    #--------------------------------------------------------------------------
    # Now we have completely aligned traces:
    # We need to remove poorly correlated traces before we compute the SVD
    # We also want to record which stachans have channels for which events
    stachan_event_list=[]
    for stachan in stachans:
        chan_traces=[]
        event_list=[]
        final_event_list=[] # List for the final indexes of events for this stachan
        for i in range(len(all_detection_streams)):
            # Extract channel
            st=all_detection_streams[i]
            tr=st.select(station=stachan.split('.')[0],\
                         channel=stachan.split('.')[1])
            if not len(tr) == 0:
                chan_traces.append(tr[0])
                event_list.append(i)
        # enforce fixed length
        for tr in chan_traces:
            tr.data=tr.data[0:int( tr.stats.sampling_rate * \
                                  ( cut[1] - cut[0] ))]
        # Compute the stack and compare to this
        chan_traces=[Stream(tr) for tr in chan_traces]
        # stack=stacking.linstack(chan_traces)
        stack=chan_traces[-1]
        chan_traces=[st[0] for st in chan_traces]
        if plotvar:
            fig, axes = plt.subplots(len(chan_traces)+1, 1, sharex=True,\
                                     figsize=(7, 12))
            axes=axes.ravel()
            axes[0].plot(stack[0].data, 'r', linewidth=1.5)
            axes[0].set_title(chan_traces[0].stats.station+'.'+\
                              chan_traces[0].stats.channel)
            axes[0].set_ylabel('Stack')
        for i, tr in enumerate(chan_traces):
            if plotvar:
                axes[i+1].plot(tr.data, 'k', linewidth=1.5)
            # corr = normxcorr2(tr.data.astype(np.float32),\
                              # stack[0].data.astype(np.float32))
            dummy, corr = xcorr(tr.data.astype(np.float32),\
                                 stack[0].data.astype(np.float32), 1)
            corr=np.array(corr).reshape(1,1)
            if plotvar:
                axes[i+1].set_ylabel(str(round(corr[0][0],2)))
            if corr[0][0] < corr_thresh:
                # Remove the channel
                print str(corr)+' for channel '+tr.stats.station+'.'+\
                        tr.stats.channel+' event '+str(i)
                all_detection_streams[event_list[i]].remove(tr)
            else:
                final_event_list.append(event_list[i])
        if plotvar:
           plt.show()
        # We should require at-least three detections per channel used
        # Compute the SVD
        if len(final_event_list) >= 3:
            stachan_event_list.append((stachan, final_event_list))
        else:
            for i in range(len(all_detection_streams)):
                tr=all_detection_streams[i].select(station=stachan.split('.')[0])
                if not len(tr) == 0:
                    all_detection_streams[i].remove(tr[0])
    # Remove empty streams
    filled_streams=[]
    for stream in all_detection_streams:
        if not len(stream) == 0:
            filled_streams.append(stream)
    all_detection_streams = filled_streams
    # Now we have the streams that are highly enough correlated and the list of
    # which events these correspond to
    print len(all_detection_streams)
    print stachan_event_list
    if len(all_detection_streams) > 0 and len(all_detection_streams[0]) > 0:
        V, s, U, out_stachans = clustering.SVD(all_detection_streams)
        # Reorder the event list
        event_list=[]
        event_stachans=[]
        for out_stachan in out_stachans:
            for stachan in stachan_event_list:
                if stachan[0] == out_stachan:
                    event_list.append(stachan[1])
                    event_stachans.append(stachan[0])
                    print len(stachan[1])
        print event_list
        relative_moments, event_list = SVD_moments(U, s, V, event_stachans,\
                                                   event_list)
        print '\n\nRelative moments: '
        print relative_moments
        for stachan in stachan_event_list:
            print stachan
        # Now we have the relative moments for all appropriate events - this should
        # include the template event also, which has a manually determined magnitude
        # Check that we have got the template event
        if not event_list[-1] == len(detections):
            print 'Template not included in relative magnitude, fail'
            print 'Largest event in event_list: '+str(event_list[-1])
            print 'You gave me '+str(len(detections))+' detections'
            return False
        # Convert the template magnitude to seismic moment
        template_moment = local_to_moment(template_mag)
        # Extrapolate from the template moment - relative moment relationship to
        # Get the moment for relative moment = 1.0
        norm_moment = template_moment / relative_moments[-1]
        # Template is the last event in the list
        # Now these are weights which we can multiple the moments by
        moments = relative_moments * norm_moment
        print 'Moments '
        print moments
        # Now convert to Mw
        Mw = [2.0/3.0 * (np.log10(M) - 9.0 ) for M in moments]
        print 'Moment magnitudes: '
        print Mw
        # Convert to local
        Ml = [ 0.88 * M + 0.73 for M in Mw ]
        print 'Local magnitudes: '
        print Ml
        print 'Template_magnitude: '
        print template_mag
        i=0
        for event_id in event_list[0:-1]:
            print event_id
            print Ml[i]
            event_headers[event_id].Mag_2=Ml[i]
            event_headers[event_id].Mag_2_type='S'
            i+=1
        return event_headers
    else:
        print 'No useful channels'
        print all_detection_streams
        return False
Exemplo n.º 18
0
def from_sfile(sfile, lowcut, highcut, samp_rate, filt_order, length, swin,\
               debug=0):
    """
    Function to read in picks from sfile then generate the template from the
    picks within this and the wavefile found in the pick file.

    :type sfile: string
    :param sfile: sfilename must be the\
    path to a seisan nordic type s-file containing waveform and pick\
    information.
    :type lowcut: float
    :param lowcut: Low cut (Hz), if set to None will look in template\
            defaults file
    :type highcut: float
    :param lowcut: High cut (Hz), if set to None will look in template\
            defaults file
    :type samp_rate: float
    :param samp_rate: New sampling rate in Hz, if set to None will look in\
            template defaults file
    :type filt_order: int
    :param filt_order: Filter level, if set to None will look in\
            template defaults file
    :type swin: str
    :param swin: Either 'all', 'P' or 'S', to select which phases to output.
    :type length: float
    :param length: Extract length in seconds, if None will look in template\
            defaults file.
    :type debug: int
    :param debug: Debug level, higher number=more output.
    """
    # Perform some checks first
    import os
    import sys
    if not os.path.isfile(sfile):
        raise IOError('sfile does not exist')

    from eqcorrscan.utils import Sfile_util
    # Read in the header of the sfile
    wavefiles=Sfile_util.readwavename(sfile)
    pathparts=sfile.split('/')[0:len(sfile.split('/'))-1]
    wavpath=''
    for part in pathparts:
        if part == 'REA':
            part='WAV'
        wavpath+=part+'/'
    from obspy import read as obsread
    from eqcorrscan.utils import pre_processing
    # Read in waveform file
    for wavefile in wavefiles:
        print "I am going to read waveform data from: "+wavpath+wavefile
        if 'st' in locals():
            st+=obsread(wavpath+wavefile)
        else:
            st=obsread(wavpath+wavefile)
    for tr in st:
        if tr.stats.sampling_rate < samp_rate:
            print 'Sampling rate of data is lower than sampling rate asked for'
            print 'As this is not good practice for correlations I will not do this'
            raise ValueError("Trace: "+tr.stats.station+" sampling rate: "+\
                             str(tr.stats.sampling_rate))
    # Read in pick info
    picks=Sfile_util.readpicks(sfile)
    print "I have found the following picks"
    for pick in picks:
        print pick.station+' '+pick.channel+' '+pick.phase+' '+str(pick.time)

    # Process waveform data
    st=pre_processing.shortproc(st, lowcut, highcut, filt_order,\
                      samp_rate, debug)
    st1=_template_gen(picks, st, length, swin)
    return st1
Exemplo n.º 19
0
def from_contbase(sfile, contbase_list, lowcut, highcut, samp_rate, filt_order,\
                 length, prepick, swin, debug=0):
    """
    Function to read in picks from sfile then generate the template from the
    picks within this and the wavefiles from the continous database of day-long
    files.  Included is a section to sanity check that the files are daylong and
    that they start at the start of the day.  You should ensure this is the case
    otherwise this may alter your data if your data are daylong but the headers
    are incorrectly set.

    :type sfile: string
    :param sfile: sfilename must be the path to a seisan nordic type s-file \
            containing waveform and pick information, all other arguments can \
            be numbers save for swin which must be either P, S or all \
            (case-sensitive).
    :type contbase_list: List of tuple of string
    :param contbase_list: List of tuples of the form ['path', 'type', 'network']\
                    Where path is the path to the continuous database, type is\
                    the directory structure, which can be either Yyyyy/Rjjj.01,\
                    which is the standard IRIS Year, julian day structure, or,\
                    yyyymmdd which is a single directory for every day.
    :type lowcut: float
    :param lowcut: Low cut (Hz), if set to None will look in template\
            defaults file
    :type highcut: float
    :param lowcut: High cut (Hz), if set to None will look in template\
            defaults file
    :type samp_rate: float
    :param samp_rate: New sampling rate in Hz, if set to None will look in\
            template defaults file
    :type filt_order: int
    :param filt_order: Filter level, if set to None will look in\
            template defaults file
    :type length: float
    :param length: Extract length in seconds, if None will look in template\
            defaults file.
    :type prepick: float
    :param prepick: Pre-pick time in seconds
    :type swin: str
    :param swin: Either 'all', 'P' or 'S', to select which phases to output.
    :type debug: int
    :param debug: Level of debugging output, higher=more
    """
    # Perform some checks first
    import os, sys
    if not os.path.isfile(sfile):
        raise IOError('sfile does not exist')

    # import some things
    from eqcorrscan.utils import Sfile_util
    from eqcorrscan.utils import pre_processing
    import glob
    from obspy import UTCDateTime

    # Read in the header of the sfile
    header=Sfile_util.readheader(sfile)
    day=UTCDateTime(str(header.time.year)+'-'+str(header.time.month).zfill(2)+\
                    '-'+str(header.time.day).zfill(2))

    # Read in pick info
    picks=Sfile_util.readpicks(sfile)
    print "I have found the following picks"
    pick_chans=[]
    used_picks=[]
    for pick in picks:
        if not pick.station+pick.channel in pick_chans and pick.phase in ['P','S']:
            pick_chans.append(pick.station+pick.channel)
            used_picks.append(pick)
            print pick
            for contbase in contbase_list:
                if contbase[1] == 'yyyy/mm/dd':
                    daydir=str(day.year)+'/'+str(day.month).zfill(2)+'/'+\
                            str(day.day).zfill(2)
                elif contbase[1]=='Yyyyy/Rjjj.01':
                    daydir='Y'+str(day.year)+'/R'+str(day.julday).zfill(3)+'.01'
                elif contbase[1]=='yyyymmdd':
                    daydir=str(day.year)+str(day.month).zfill(2)+str(day.day).zfill(2)
                if 'wavefiles' in locals():
                    wavefiles+=glob.glob(contbase[0]+'/'+daydir+'/*'+pick.station+\
                    '.*')
                else:
                    wavefiles=(glob.glob(contbase[0]+'/'+daydir+'/*'+pick.station+\
                    '.*'))
        elif pick.phase in ['P','S']:
            print 'Duplicate pick '+pick.station+' '+pick.channel+' '+pick.phase+\
            ' '+str(pick.time)
        elif pick.phase =='IAML':
            print 'Amplitude pick '+pick.station+' '+pick.channel+' '+pick.phase+\
            ' '+str(pick.time)
    picks=used_picks
    wavefiles=list(set(wavefiles))

    # Read in waveform file
    from obspy import read as obsread
    wavefiles.sort()
    for wavefile in wavefiles:
        print "I am going to read waveform data from: "+wavefile
        if 'st' in locals():
            st+=obsread(wavefile)
        else:
            st=obsread(wavefile)
    # Porcess waveform data
    st.merge(fill_value='interpolate')
    for tr in st:
        tr=pre_processing.dayproc(tr, lowcut, highcut, filt_order,\
                                samp_rate, debug, day)
    # Cut and extract the templates
    st1=_template_gen(picks, st, length, swin, prepick=prepick)
    return st1
Exemplo n.º 20
0
def Amp_pick_sfile(sfile, datapath, respdir, chans=['Z'], var_wintype=True, \
                   winlen=0.9, pre_pick=0.2, pre_filt=True, lowcut=1.0,\
                   highcut=20.0, corners=4):
    """
    Function to read information from a SEISAN s-file, load the data and the
    picks, cut the data for the channels given around the S-window, simulate
    a Wood Anderson seismometer, then pick the maximum peak-to-trough
    amplitude.

    Output will be put into a mag_calc.out file which will be in full S-file
    format and can be copied to a REA database.

    :type sfile: String
    :type datapath: String
    :param datapath: Path to the waveform files - usually the path to the WAV directory
    :type respdir: String
    :param respdir: Path to the response information directory
    :type chans: List of strings
    :param chans: List of the channels to pick on, defaults to ['Z'] - should
                just be the orientations, e.g. Z,1,2,N,E
    :type var_wintype: Bool
    :param var_wintype: If True, the winlen will be
                    multiplied by the P-S time if both P and S picks are
                    available, otherwise it will be multiplied by the hypocentral
                    distance*0.34 - dervided using a p-s ratio of 1.68 and
                    S-velocity of 1.5km/s to give a large window, defaults to True
    :type winlen: Float
    :param winlen: Length of window, see above parameter, if var_wintype is False
                    Then this will be in seconds, otherwise it is the multiplier
                    to the p-s time, defaults to 0.5
    :type pre_pick: Float
    :param pre_pick: Time before the s-pick to start the cut window, defaults
                    to 0.2
    :type pre_filt: Bool
    :param pre_filt: To apply a pre-filter or not, defaults to True
    :type lowcut: Float
    :param lowcut: Lowcut in Hz for the pre-filter, defaults to 1.0
    :type highcut: Float
    :param highcut: Highcut in Hz for the pre-filter, defaults to 20.0
    :type corners: Int
    :param corners: Number of corners to use in the pre-filter
    """
    # Hardwire a p-s multiplier of hypocentral distance based on p-s ratio of
    # 1.68 and an S-velocity 0f 1.5km/s, deliberately chosen to be quite slow
    ps_multiplier=0.34
    from eqcorrscan.utils import Sfile_util
    from obspy import read
    from scipy.signal import iirfilter
    from obspy.signal.invsim import paz2AmpValueOfFreqResp
    import warnings
    # First we need to work out what stations have what picks
    picks=Sfile_util.readpicks(sfile)
    # Convert these picks into a lists
    stations=[] # List of stations
    channels=[] # List of channels
    picktimes=[] # List of pick times
    picktypes=[] # List of pick types
    distances=[] # List of hypocentral distances
    picks_out=[]
    for pick in picks:
        if pick.phase in ['P','S']:
            picks_out.append(pick) # Need to be able to remove this if there
                                   # isn't data for a station!
            stations.append(pick.station)
            channels.append(pick.channel)
            picktimes.append(pick.time)
            picktypes.append(pick.phase)
            distances.append(pick.distance)
    # Read in waveforms
    stream=read(datapath+'/'+Sfile_util.readwavename(sfile)[0])
    if len(Sfile_util.readwavename(sfile)) > 1:
        for wavfile in Sfile_util.readwavename(sfile):
            stream+=read(datapath+'/'+wavfile)
    stream.merge() # merge the data, just in case!
    # For each station cut the window
    uniq_stas=list(set(stations))
    for sta in uniq_stas:
        for chan in chans:
            print 'Working on '+sta+' '+chan
            tr=stream.select(station=sta, channel='*'+chan)
            if not tr:
                # Remove picks from file
                # picks_out=[picks_out[i] for i in xrange(len(picks))\
                           # if picks_out[i].station+picks_out[i].channel != \
                           # sta+chan]
            	warnings.warn('There is no station and channel match in the wavefile!')
                break
            else:
                tr=tr[0]
            # Apply the pre-filter
            if pre_filt:
                try:
                    tr.detrend('simple')
                except:
                    dummy=tr.split()
                    dummy.detrend('simple')
                    tr=dummy.merge()[0]
                tr.filter('bandpass',freqmin=lowcut, freqmax=highcut,\
                             corners=corners)
            sta_picks=[i for i in xrange(len(stations)) \
                           if stations[i]==sta]
            hypo_dist=picks[sta_picks[0]].distance
            CAZ=picks[sta_picks[0]].CAZ
            if var_wintype:
                if 'S' in [picktypes[i] for i in sta_picks] and\
                   'P' in [picktypes[i] for i in sta_picks]:
                    # If there is an S-pick we can use this :D
                    S_pick=[picktimes[i] for i in sta_picks \
                            if picktypes[i]=='S']
                    S_pick=min(S_pick)
                    P_pick=[picktimes[i] for i in sta_picks \
                            if picktypes[i]=='P']
                    P_pick=min(P_pick)
                    try:
                    	tr.trim(starttime=S_pick-pre_pick, \
                               endtime=S_pick+(S_pick-P_pick)*winlen)
                    except:
                    	break
                elif 'S' in [picktypes[i] for i in sta_picks]:
                    S_pick=[picktimes[i] for i in sta_picks \
                            if picktypes[i]=='S']
                    S_pick=min(S_pick)
                    P_modelled=S_pick-hypo_dist*ps_multiplier
                    try:
                    	tr.trim(starttime=S_pick-pre_pick,\
                            endtime=S_pick+(S_pick-P_modelled)*winlen)
                    except:
                    	break
                else:
                    # In this case we only have a P pick
                    P_pick=[picktimes[i] for i in sta_picks \
                            if picktypes[i]=='P']
                    P_pick=min(P_pick)
                    S_modelled=P_pick+hypo_dist*ps_multiplier
                    try:
                    	tr.trim(starttime=S_modelled-pre_pick,\
                        	    endtime=S_modelled+(S_modelled-P_pick)*winlen)
                    except:
                    	break
                # Work out the window length based on p-s time or distance
            elif 'S' in [picktypes[i] for i in sta_picks]:
                # If the window is fixed we still need to find the start time,
                # which can be based either on the S-pick (this elif), or
                # on the hypocentral distance and the P-pick

                # Take the minimum S-pick time if more than one S-pick is available
                S_pick=[picktimes[i] for i in sta_picks \
                           if picktypes[i]=='S']
                S_pick=min(S_pick)
                try:
                	tr.trim(starttime=S_pick-pre_pick, endtime=S_pick+winlen)
                except:
                	break
            else:
                # In this case, there is no S-pick and the window length is fixed
                # We need to calculate an expected S_pick based on the hypocentral
                # distance, this will be quite hand-wavey as we are not using
                # any kind of velocity model.
                P_pick=[picktimes[i] for i in sta_picks \
                           if picktypes[i]=='P']
                P_pick=min(P_pick)
                hypo_dist=[distances[i] for i in sta_picks\
                           if picktypes[i]=='P'][0]
                S_modelled=P_pick+hypo_dist*ps_multiplier
                try:
                	tr.trim(starttime=S_modelled-pre_pick,\
                    	       endtime=S_modelled+winlen)
                except:
        	       break
            # Find the response information
            resp_info=_find_resp(tr.stats.station, tr.stats.channel,\
                           tr.stats.network, tr.stats.starttime, tr.stats.delta,\
                                 respdir)
            PAZ=[]
            seedresp=[]
            if resp_info and 'gain' in resp_info:
                PAZ=resp_info
            elif resp_info:
                seedresp=resp_info
            # Simulate a Wood Anderson Seismograph
            if PAZ and len(tr.data) > 10: # Set ten data points to be the minimum to pass
                tr=_sim_WA(tr, PAZ, None, 10)
            elif seedresp and len(tr.data) > 10:
                tr=_sim_WA(tr, None, seedresp, 10)
            elif len(tr.data) > 10:
                warnings.warn('No PAZ for '+tr.stats.station+' '+\
                                 tr.stats.channel+' at time: '+\
                                 str(tr.stats.starttime))
                continue
            if len(tr.data) <= 10:
                # Should remove the P and S picks if len(tr.data)==0
                warnings.warn('No data found for: '+tr.stats.station)
                # print 'No data in miniseed file for '+tr.stats.station+\
                              # ' removing picks'
                # picks_out=[picks_out[i] for i in xrange(len(picks_out))\
                           # if i not in sta_picks]
            	break
            # Get the amplitude
            amplitude, period, delay= _max_p2t(tr.data, tr.stats.delta)
            if amplitude==0.0:
                break
            print 'Amplitude picked: '+str(amplitude)
            # Note, amplitude should be in meters at the moment!
            # Remove the pre-filter response
            if pre_filt:
                # Generate poles and zeros for the filter we used earlier - this
                # is how the filter is designed in the convenience methods of
                # filtering in obspy.
                z, p, k=iirfilter(corners, [lowcut/(0.5*tr.stats.sampling_rate),\
                                            highcut/(0.5*tr.stats.sampling_rate)],\
                                  btype='band', ftype='butter', output='zpk')
                filt_paz={'poles': list(p),
                          'zeros': list(z),
                          'gain': k,
                          'sensitivity':  1.0}
                amplitude /= (paz2AmpValueOfFreqResp(filt_paz, 1/period) * \
                              filt_paz['sensitivity'])
            # Convert amplitude to mm
            if PAZ: # Divide by Gain to get to nm (returns pm? 10^-12)
                # amplitude *=PAZ['gain']
                amplitude /= 1000
            if seedresp: # Seedresp method returns mm
                amplitude *= 1000000
            # Write out the half amplitude, approximately the peak amplitude as
            # used directly in magnitude calculations
            # Page 343 of Seisan manual:
            #   Amplitude (Zero-Peak) in units of nm, nm/s, nm/s^2 or counts
            amplitude *= 0.5
            # Generate a PICK type object for this pick
            picks_out.append(Sfile_util.PICK(station=tr.stats.station,
                                         channel=tr.stats.channel,
                                         impulsivity=' ',
                                         phase='IAML',
                                         weight='', polarity=' ',
                                         time=tr.stats.starttime+delay,
                                         coda=999, amplitude=amplitude,
                                         peri=period, azimuth=float('NaN'),
                                         velocity=float('NaN'), AIN=999, SNR='',
                                         azimuthres=999, timeres=float('NaN'),
                                         finalweight=999, distance=hypo_dist,
                                         CAZ=CAZ))
    # Copy the header from the sfile to a new local S-file
    fin=open(sfile,'r')
    fout=open('mag_calc.out','w')
    for line in fin:
        if not line[79]=='7':
            fout.write(line)
        else:
            fout.write(line)
            break
    fin.close()
    fout.close()
    # Write picks out to new s-file
    for pick in picks_out:
        print pick
    Sfile_util.populateSfile('mag_calc.out',picks_out)
    return picks
Exemplo n.º 21
0
def synth_from_sfile(sfile, samp_rate, length=10.0, PS_ratio=1.68):
    """
    Function to generate a synthetic template for a given s-file

    :type path: str
    :param path: Path to the sfile
    :type samp_rate: float
    :param samp_rate: Sampling rate in Hz for template
    :type length: float
    :param length: Length of templates in seconds, defaults to 10
    :type PS_ratio: float
    :param PS_ratio: S-P ratio to use if only one pick found for station,\
            defaults to 1.68

    :returns: :class: obspy.Stream
    """
    from eqcorrscan.utils import Sfile_util
    from eqcorrscan.utils import synth_seis
    from obspy import Stream, Trace, UTCDateTime
    # Get the picks and the origin time
    picks=Sfile_util.readpicks(sfile)
    ori_time=Sfile_util.readheader(sfile).time.datetime
    # We only want P and S phases
    picks=[p for p in picks if p.phase in ['P','S']]
    # We want a list of the stations that we have picks for
    stations=list(set([p.station for p in picks]))
    # Loop through the stations
    synths=Stream()
    for station in stations:
        # Find the relevant picks
        sta_picks=[p for p in picks if p.station==station]
        if len(sta_picks) == 1:
            msg='Only '+sta_picks[0].phase+' phase picked for station '+\
                station+' will use an S-P ratio of '+str(PS_ratio)
            warnings.warn(msg)
            # Calculate the pick travel time
            tt = (sta_picks[0].time.datetime-ori_time).total_seconds()
            if sta_picks[0].phase == 'P':
                SP_time=(tt*PS_ratio)-tt
                P_pick=sta_picks[0].time.datetime
                S_pick=(UTCDateTime(P_pick)+SP_time).datetime
            else:
                SP_time=tt-(tt/PS_ratio)
                P_pick=(sta_picks[0].time-SP_time).datetime
                S_pick=sta_picks[0].time.datetime
        else:
            if len([p for p in sta_picks if p.phase=='P']) > 1:
                warnings.warn('Multiple P picks found for station '+station+\
                                ', will use earliest')
                P_pick=min([p.time for p in sta_picks if p.phase=='P'])
                channel=sta_picks[0].channel
            else:
                P_pick=[p.time for p in sta_picks if p.phase=='P'][0]
                channel=sta_picks[0].channel
            if len([p for p in sta_picks if p.phase=='S']) > 1:
                warnings.warn('Multiple S picks found for station '+station+\
                                ', will use earliest')
                S_pick=min([p.time for p in sta_picks if p.phase=='S'])
            else:
                S_pick=[p.time for p in sta_picks if p.phase=='S'][0]
            if P_pick > S_pick:
                raise ValueError('P pick is after S pick')
            SP_time=(S_pick.datetime-P_pick.datetime).total_seconds()
        # Loop through the picks available
        for p in sta_picks:
            tr=Trace(synth_seis.seis_sim(int(SP_time*samp_rate),\
                        flength=length*samp_rate, phaseout=p.phase))
            tr.stats.sampling_rate=samp_rate
            tr.stats.station=station
            tr.stats.channel=p.channel
            # Sythetics start 10 samples before P
            if p.phase in ['all', 'P']:
                tr.stats.starttime=UTCDateTime(P_pick)-(10.0/samp_rate)
            else:
                tr.stats.starttime=UTCDateTime(S_pick)-(10.0/samp_rate)
            synths+=tr
    return synths
Exemplo n.º 22
0
def Amp_pick_sfile(sfile,
                   datapath,
                   respdir,
                   chans=['Z'],
                   var_wintype=True,
                   winlen=0.9,
                   pre_pick=0.2,
                   pre_filt=True,
                   lowcut=1.0,
                   highcut=20.0,
                   corners=4):
    """
    Function to read information from a SEISAN s-file, load the data and the \
    picks, cut the data for the channels given around the S-window, simulate \
    a Wood Anderson seismometer, then pick the maximum peak-to-trough \
    amplitude.

    Output will be put into a mag_calc.out file which will be in full S-file \
    format and can be copied to a REA database.

    :type sfile: string
    :type datapath: string
    :param datapath: Path to the waveform files - usually the path to the WAV \
        directory
    :type respdir: string
    :param respdir: Path to the response information directory
    :type chans: List of strings
    :param chans: List of the channels to pick on, defaults to ['Z'] - should \
        just be the orientations, e.g. Z,1,2,N,E
    :type var_wintype: bool
    :param var_wintype: If True, the winlen will be \
        multiplied by the P-S time if both P and S picks are \
        available, otherwise it will be multiplied by the \
        hypocentral distance*0.34 - dervided using a p-s ratio of \
        1.68 and S-velocity of 1.5km/s to give a large window, \
        defaults to True
    :type winlen: float
    :param winlen: Length of window, see above parameter, if var_wintype is \
        False then this will be in seconds, otherwise it is the \
        multiplier to the p-s time, defaults to 0.5.
    :type pre_pick: float
    :param pre_pick: Time before the s-pick to start the cut window, defaults \
        to 0.2
    :type pre_filt: bool
    :param pre_filt: To apply a pre-filter or not, defaults to True
    :type lowcut: float
    :param lowcut: Lowcut in Hz for the pre-filter, defaults to 1.0
    :type highcut: float
    :param highcut: Highcut in Hz for the pre-filter, defaults to 20.0
    :type corners: int
    :param corners: Number of corners to use in the pre-filter
    """
    # Hardwire a p-s multiplier of hypocentral distance based on p-s ratio of
    # 1.68 and an S-velocity 0f 1.5km/s, deliberately chosen to be quite slow
    ps_multiplier = 0.34
    from eqcorrscan.utils import Sfile_util
    from obspy import read
    from scipy.signal import iirfilter
    from obspy.signal.invsim import paz2AmpValueOfFreqResp
    import warnings
    # First we need to work out what stations have what picks
    event = Sfile_util.readpicks(sfile)[0]
    # Convert these picks into a lists
    stations = []  # List of stations
    channels = []  # List of channels
    picktimes = []  # List of pick times
    picktypes = []  # List of pick types
    distances = []  # List of hypocentral distances
    picks_out = []
    for pick in event.picks:
        if pick.phase_hint in ['P', 'S']:
            picks_out.append(pick)  # Need to be able to remove this if there
            # isn't data for a station!
            stations.append(pick.waveform_id.station_code)
            channels.append(pick.waveform_id.channel_code)
            picktimes.append(pick.time)
            picktypes.append(pick.phase_hint)
            arrival = [
                arrival for arrival in event.origins[0].arrivals
                if arrival.pick_id == pick.resource_id
            ]
            distances.append(arrival.distance)
    # Read in waveforms
    stream = read(datapath + '/' + Sfile_util.readwavename(sfile)[0])
    if len(Sfile_util.readwavename(sfile)) > 1:
        for wavfile in Sfile_util.readwavename(sfile):
            stream += read(datapath + '/' + wavfile)
    stream.merge()  # merge the data, just in case!
    # For each station cut the window
    uniq_stas = list(set(stations))
    del arrival
    for sta in uniq_stas:
        for chan in chans:
            print 'Working on ' + sta + ' ' + chan
            tr = stream.select(station=sta, channel='*' + chan)
            if not tr:
                # Remove picks from file
                # picks_out=[picks_out[i] for i in xrange(len(picks))\
                # if picks_out[i].station+picks_out[i].channel != \
                # sta+chan]
                warnings.warn('There is no station and channel match in the ' +
                              'wavefile!')
                break
            else:
                tr = tr[0]
            # Apply the pre-filter
            if pre_filt:
                try:
                    tr.detrend('simple')
                except:
                    dummy = tr.split()
                    dummy.detrend('simple')
                    tr = dummy.merge()[0]
                tr.filter('bandpass',
                          freqmin=lowcut,
                          freqmax=highcut,
                          corners=corners)
            sta_picks = [
                i for i in xrange(len(stations)) if stations[i] == sta
            ]
            pick_id = event.picks[sta_picks[0]].resource_id
            arrival = [
                arrival for arrival in event.origins[0].arrivals
                if arrival.pick_id == pick_id
            ]
            hypo_dist = arrival.distance
            CAZ = arrival.azimuth
            if var_wintype:
                if 'S' in [picktypes[i] for i in sta_picks] and\
                   'P' in [picktypes[i] for i in sta_picks]:
                    # If there is an S-pick we can use this :D
                    S_pick = [
                        picktimes[i] for i in sta_picks if picktypes[i] == 'S'
                    ]
                    S_pick = min(S_pick)
                    P_pick = [
                        picktimes[i] for i in sta_picks if picktypes[i] == 'P'
                    ]
                    P_pick = min(P_pick)
                    try:
                        tr.trim(starttime=S_pick - pre_pick,
                                endtime=S_pick + (S_pick - P_pick) * winlen)
                    except:
                        break
                elif 'S' in [picktypes[i] for i in sta_picks]:
                    S_pick = [
                        picktimes[i] for i in sta_picks if picktypes[i] == 'S'
                    ]
                    S_pick = min(S_pick)
                    P_modelled = S_pick - hypo_dist * ps_multiplier
                    try:
                        tr.trim(starttime=S_pick - pre_pick,
                                endtime=S_pick +
                                (S_pick - P_modelled) * winlen)
                    except:
                        break
                else:
                    # In this case we only have a P pick
                    P_pick = [
                        picktimes[i] for i in sta_picks if picktypes[i] == 'P'
                    ]
                    P_pick = min(P_pick)
                    S_modelled = P_pick + hypo_dist * ps_multiplier
                    try:
                        tr.trim(starttime=S_modelled - pre_pick,
                                endtime=S_modelled +
                                (S_modelled - P_pick) * winlen)
                    except:
                        break
                # Work out the window length based on p-s time or distance
            elif 'S' in [picktypes[i] for i in sta_picks]:
                # If the window is fixed we still need to find the start time,
                # which can be based either on the S-pick (this elif), or
                # on the hypocentral distance and the P-pick

                # Take the minimum S-pick time if more than one S-pick is
                # available
                S_pick = [
                    picktimes[i] for i in sta_picks if picktypes[i] == 'S'
                ]
                S_pick = min(S_pick)
                try:
                    tr.trim(starttime=S_pick - pre_pick,
                            endtime=S_pick + winlen)
                except:
                    break
            else:
                # In this case, there is no S-pick and the window length is
                # fixed we need to calculate an expected S_pick based on the
                # hypocentral distance, this will be quite hand-wavey as we
                # are not using any kind of velocity model.
                P_pick = [
                    picktimes[i] for i in sta_picks if picktypes[i] == 'P'
                ]
                P_pick = min(P_pick)
                hypo_dist = [
                    distances[i] for i in sta_picks if picktypes[i] == 'P'
                ][0]
                S_modelled = P_pick + hypo_dist * ps_multiplier
                try:
                    tr.trim(starttime=S_modelled - pre_pick,
                            endtime=S_modelled + winlen)
                except:
                    break
            # Find the response information
            resp_info = _find_resp(tr.stats.station, tr.stats.channel,
                                   tr.stats.network, tr.stats.starttime,
                                   tr.stats.delta, respdir)
            PAZ = []
            seedresp = []
            if resp_info and 'gain' in resp_info:
                PAZ = resp_info
            elif resp_info:
                seedresp = resp_info
            # Simulate a Wood Anderson Seismograph
            if PAZ and len(tr.data) > 10:
                # Set ten data points to be the minimum to pass
                tr = _sim_WA(tr, PAZ, None, 10)
            elif seedresp and len(tr.data) > 10:
                tr = _sim_WA(tr, None, seedresp, 10)
            elif len(tr.data) > 10:
                warnings.warn('No PAZ for ' + tr.stats.station + ' ' +
                              tr.stats.channel + ' at time: ' +
                              str(tr.stats.starttime))
                continue
            if len(tr.data) <= 10:
                # Should remove the P and S picks if len(tr.data)==0
                warnings.warn('No data found for: ' + tr.stats.station)
                # print 'No data in miniseed file for '+tr.stats.station+\
                # ' removing picks'
                # picks_out=[picks_out[i] for i in xrange(len(picks_out))\
                # if i not in sta_picks]
                break
            # Get the amplitude
            amplitude, period, delay = _max_p2t(tr.data, tr.stats.delta)
            if amplitude == 0.0:
                break
            print 'Amplitude picked: ' + str(amplitude)
            # Note, amplitude should be in meters at the moment!
            # Remove the pre-filter response
            if pre_filt:
                # Generate poles and zeros for the filter we used earlier: this
                # is how the filter is designed in the convenience methods of
                # filtering in obspy.
                z, p, k = iirfilter(corners, [
                    lowcut / (0.5 * tr.stats.sampling_rate), highcut /
                    (0.5 * tr.stats.sampling_rate)
                ],
                                    btype='band',
                                    ftype='butter',
                                    output='zpk')
                filt_paz = {
                    'poles': list(p),
                    'zeros': list(z),
                    'gain': k,
                    'sensitivity': 1.0
                }
                amplitude /= (paz2AmpValueOfFreqResp(filt_paz, 1 / period) *
                              filt_paz['sensitivity'])
            # Convert amplitude to mm
            if PAZ:  # Divide by Gain to get to nm (returns pm? 10^-12)
                # amplitude *=PAZ['gain']
                amplitude /= 1000
            if seedresp:  # Seedresp method returns mm
                amplitude *= 1000000
            # Write out the half amplitude, approximately the peak amplitude as
            # used directly in magnitude calculations
            # Page 343 of Seisan manual:
            #   Amplitude (Zero-Peak) in units of nm, nm/s, nm/s^2 or counts
            amplitude *= 0.5
            # Generate a PICK type object for this pick
            picks_out.append(
                Sfile_util.PICK(station=tr.stats.station,
                                channel=tr.stats.channel,
                                impulsivity=' ',
                                phase='IAML',
                                weight='',
                                polarity=' ',
                                time=tr.stats.starttime + delay,
                                coda=999,
                                amplitude=amplitude,
                                peri=period,
                                azimuth=float('NaN'),
                                velocity=float('NaN'),
                                AIN=999,
                                SNR='',
                                azimuthres=999,
                                timeres=float('NaN'),
                                finalweight=999,
                                distance=hypo_dist,
                                CAZ=CAZ))
    # Copy the header from the sfile to a new local S-file
    fin = open(sfile, 'r')
    fout = open('mag_calc.out', 'w')
    for line in fin:
        if not line[79] == '7':
            fout.write(line)
        else:
            fout.write(line)
            break
    fin.close()
    for pick in picks_out:
        fout.write(pick)
        # Note this uses the legacy pick class
    fout.close()
    # Write picks out to new s-file
    for pick in picks_out:
        print pick
    # Sfile_util.populateSfile('mag_calc.out', picks_out)
    return picks_out
Exemplo n.º 23
0
def match_synth(sfile, cont_base, freqmin=2.0, freqmax=10.0, samp_rate=100.0,\
                threshold=8.0, threshold_type='MAD', trig_int=6.0, plotvar=True,\
                save_template=True):
    """
    Function to generate a basic synthetic from a real event, given by an s-file
    and cross-correlate this with the day of continuous data including the event

    :type sfile: str
    :param sfile: Path to the s-file for the event
    :type cont_base: str
    :param cont_base: Path to the continuous data, should be in Yyyyy/Rjjj.01\
                directories
    :type freqmin: float
    :param freqmin: Low-cut for bandpass in Hz, defualts to 2.0
    :type freqmax: float
    :param freqmax: High-cut for bandpass in Hz, defaults to 10.0
    :type samp_rate: float
    :param samp_rate: Desired sampling rate in Hz, defaults to 100.0
    :type threshold: float
    :param threshold: Threshold for detection in cccsum, defaults to 8.0
    :type threshold_type: str
    :param threshold_type: Type to threshold, either MAD or ABS, defaults to MAD
    :type trig_int: float
    :param trig_int: Trigger interval in seconds, defaults to 6.0
    :type plotvar: bool
    :param plotvar: To plot or not, defaults to true

    :returns: detections
    """
    # import matplotlib.pyplot as plt
    from eqcorrscan.core import match_filter, template_gen
    from eqcorrscan.utils import Sfile_util, pre_processing
    import glob
    from obspy import read, Stream, UTCDateTime
    from obspy.signal.cross_correlation import xcorr
    from joblib import Parallel, delayed
    from multiprocessing import cpu_count
    import numpy as np
    # Generate the synthetic
    synth_template=synth_from_sfile(sfile, samp_rate, length=1.0,\
                                    PS_ratio=1.68)
    synth_template.filter('bandpass', freqmin=freqmin, freqmax=freqmax)
    for tr in synth_template:
        tr.data=(tr.data*1000).astype(np.int32)
    # Find the date from the sfile
    event_date=Sfile_util.readheader(sfile).time.datetime
    day=UTCDateTime(event_date.date())
    # Work out which stations we have template info for
    stachans=[(tr.stats.station, tr.stats.channel) for tr in synth_template]
    # Read in the day of data
    for stachan in stachans:
        wavfile=glob.glob(cont_base+event_date.strftime('/Y%Y/R%j.01/')+\
                            stachan[0]+'.*.'+stachan[1][0]+'?'+stachan[1][-1]+\
                            '.'+event_date.strftime('%Y.%j'))
        if len(wavfile) != 0:
            for wavf in wavfile:
                if not 'st' in locals():
                    st=read(wavf)
                else:
                    st+=read(wavf)
    st=st.merge(fill_value='interpolate')
    cores=cpu_count()
    if len(st) < cores:
        jobs=len(st)
    else:
        jobs=cores
    st=Parallel(n_jobs=jobs)(delayed(pre_processing.dayproc)(tr, freqmin,\
                                                             freqmax, 3,\
                                                             samp_rate, 0,\
                                                             day)
                            for tr in st)
    st=Stream(st)
    # Make the real template
    picks=Sfile_util.readpicks(sfile)
    real_template=template_gen._template_gen(picks, st, 1.0, 'all',\
                                            prepick=10/samp_rate)
    for tr in real_template:
        tr.data=tr.data.astype(np.int32)
    if save_template:
        real_template.write('Real_'+sfile.split('/')[-1], format='MSEED',\
                            encoding='STEIM2')
    # Shift the synthetic to better align with the real one
    for tr in real_template:
        synth_tr=synth_template.select(station=tr.stats.station,\
                                        channel=tr.stats.channel)[0]
        shift, corr = xcorr(tr.data, synth_tr.data, 20)
        print tr.stats.station+'.'+tr.stats.channel+\
            ' shift='+str(shift)+'samples corr='+str(corr)
        if corr < 0:
            synth_tr.data*=-1
        # Apply a pad
        pad=np.zeros(abs(shift))
        if shift < 0:
            synth_tr.data=np.append(synth_tr.data, pad)[abs(shift):]
        elif shift > 0:
            synth_tr.data=np.append(pad, synth_tr.data)[0:-shift]
    if save_template:
        synth_template.write('Synthetic_'+sfile.split('/')[-1],
                            format='MSEED', encoding='STEIM2')
    # Now we have processed data and a template, we can try and detect!
    detections=match_filter.match_filter(['Synthetic_'+sfile.split('/')[-1],
                                        'Real_'+sfile.split('/')[-1]],\
                                        [synth_template, real_template],\
                                        st, threshold, \
                                        threshold_type, trig_int,\
                                        plotvar, 'synth_temp')
    f=open('Synthetic_test.csv', 'w')
    f.write('template, detect-time, cccsum, threshold, number of channels\n')
    for detection in detections:
        # output detections to file
        f.write(detection.template_name+', '+str(detection.detect_time)+\
                ', '+str(detection.detect_val)+', '+str(detection.threshold)+\
                ', '+str(detection.no_chans)+'\n')
        print 'template: '+detection.template_name+' detection at: '\
            +str(detection.detect_time)+' with a cccsum of: '+\
            str(detection.detect_val)
    if detections:
        f.write('\n')
    f.close()
Exemplo n.º 24
0
# Now we find the s-file we want to use to generate a template from
data_directory = os.path.join('test_data', 'tutorial_data')
sfiles = glob.glob(os.path.join(data_directory, '*L.S*'))
print sfiles

templates = []
template_names = []
for i, sfile in enumerate(sfiles):
    # Read in the picks from the S-file, note, in the full case one fo the main\
    # functions in template_gen would be used rather than this, but for\
    # the tutorial we will read in the data here - also note that this\
    # template generation is inefficient for multiple templates, if using\
    # daylong data for multiple templates you would want to only read\
    # the seismic data once and cut it multiple times.
    picks = Sfile_util.readpicks(sfile)
    for pick in picks:
        print pick
        if not 'wavefiles' in locals():
            wavefiles = glob.glob(
                os.path.join(data_directory, '.'.join([pick.station, '*'])))
        else:
            wavefiles += glob.glob(
                os.path.join(data_directory, '.'.join([pick.station, '*'])))
    wavefiles = list(set(wavefiles))
    for wavefile in wavefiles:
        print ' '.join(['Reading data from', wavefile])
        if 'st' not in locals():
            st = read(wavefile)
        else:
            st += read(wavefile)
Exemplo n.º 25
0
def from_sfile(sfile,
               lowcut,
               highcut,
               samp_rate,
               filt_order,
               length,
               swin,
               debug=0):
    r"""Function to read in picks from sfile then generate the template from the
    picks within this and the wavefile found in the pick file.

    :type sfile: string
    :param sfile: sfilename must be the\
    path to a seisan nordic type s-file containing waveform and pick\
    information.
    :type lowcut: float
    :param lowcut: Low cut (Hz), if set to None will look in template\
            defaults file
    :type highcut: float
    :param lowcut: High cut (Hz), if set to None will look in template\
            defaults file
    :type samp_rate: float
    :param samp_rate: New sampling rate in Hz, if set to None will look in\
            template defaults file
    :type filt_order: int
    :param filt_order: Filter level, if set to None will look in\
            template defaults file
    :type swin: str
    :param swin: Either 'all', 'P' or 'S', to select which phases to output.
    :type length: float
    :param length: Extract length in seconds, if None will look in template\
            defaults file.
    :type debug: int
    :param debug: Debug level, higher number=more output.
    """
    # Perform some checks first
    import os
    if not os.path.isfile(sfile):
        raise IOError('sfile does not exist')

    from eqcorrscan.utils import pre_processing
    from eqcorrscan.utils import Sfile_util
    from obspy import read as obsread
    # Read in the header of the sfile
    wavefiles = Sfile_util.readwavename(sfile)
    pathparts = sfile.split('/')[0:-1]
    for part in pathparts:
        if part == 'REA':
            part = 'WAV'
    wavpath = os.path.join(pathparts)
    # Read in waveform file
    for wavefile in wavefiles:
        print ''.join(
            ["I am going to read waveform data from: ", wavpath, wavefile])
        if 'st' not in locals():
            st = obsread(wavpath + wavefile)
        else:
            st += obsread(wavpath + wavefile)
    for tr in st:
        if tr.stats.sampling_rate < samp_rate:
            print 'Sampling rate of data is lower than sampling rate asked for'
            print 'Not good practice for correlations: I will not do this'
            raise ValueError("Trace: " + tr.stats.station +
                             " sampling rate: " + str(tr.stats.sampling_rate))
    # Read in pick info
    picks = Sfile_util.readpicks(sfile)
    print "I have found the following picks"
    for pick in picks:
        print ' '.join(
            [pick.station, pick.channel, pick.phase,
             str(pick.time)])

    # Process waveform data
    st = pre_processing.shortproc(st, lowcut, highcut, filt_order, samp_rate,
                                  debug)
    st1 = _template_gen(picks, st, length, swin)
    return st1