Ejemplo n.º 1
0
def Noise_plotting(station, channel, PAZ, datasource):
    """
    Function to make use of obspy's PPSD functionality to read in data from
    a single station and the poles-and-zeros for that station before plotting
    the PPSD for this station.  See McNamara(2004) for more details.

    :type station: String
    :param station: Station name as it is in the filenames in the database
    :type channel: String
    :param channel: Channel name as it is in the filenames in the database
    :type PAZ: Dict
    :param PAZ: Must contain, Poles, Zeros, Sensitivity, Gain
        :type Poles: List of Complex
        :type Zeros: List of Complex
        :type Sensitivity: Float
        :type Gain: Float
    :type datasource: String
    :param datasource: The directory in which data can be found, can contain
                        wildcards.

    :returns: PPSD object
    """
    from obspy.signal import PPSD
    from obspy import read as obsread
    import glob

    stafiles = glob.glob(datasource + '/*' + station + '*' + channel + '*')
    stafiles.sort()
    # Initialize PPSD
    st = obsread(stafiles[0])
    ppsd = PPSD(st[0].stats, PAZ)
    for stafile in stafiles[1:]:
        print 'Adding waveform from: ' + stafile
        st = obsread(stafile)
        # Add after read to conserve memory
        ppsd.add(st)
    # Plot the PPSD
    ppsd.plot()
    return ppsd
def Noise_plotting(station, channel, PAZ, datasource):
    """
    Function to make use of obspy's PPSD functionality to read in data from
    a single station and the poles-and-zeros for that station before plotting
    the PPSD for this station.  See McNamara(2004) for more details.

    :type station: String
    :param station: Station name as it is in the filenames in the database
    :type channel: String
    :param channel: Channel name as it is in the filenames in the database
    :type PAZ: Dict
    :param PAZ: Must contain, Poles, Zeros, Sensitivity, Gain
        :type Poles: List of Complex
        :type Zeros: List of Complex
        :type Sensitivity: Float
        :type Gain: Float
    :type datasource: String
    :param datasource: The directory in which data can be found, can contain
                        wildcards.

    :returns: PPSD object
    """
    from obspy.signal import PPSD
    from obspy import read as obsread
    import glob

    stafiles=glob.glob(datasource+'/*'+station+'*'+channel+'*')
    stafiles.sort()
    # Initialize PPSD
    st=obsread(stafiles[0])
    ppsd = PPSD(st[0].stats, PAZ)
    for stafile in stafiles[1:]:
        print 'Adding waveform from: '+stafile
        st=obsread(stafile)
        # Add after read to conserve memory
        ppsd.add(st)
    # Plot the PPSD
    ppsd.plot()
    return ppsd
Ejemplo n.º 3
0
def blanksfile(wavefile,
               evtype,
               userID,
               outdir,
               overwrite=False,
               evtime=False):
    """
    Generate an empty s-file with a populated header for a given waveform.

    :type wavefile: str
    :param wavefile: Wavefile to associate with this S-file, the timing of \
        the S-file will be taken from this file if evtime is not set.
    :type evtype: str
    :param evtype: Event type letter code, e.g. L, R, D
    :type userID: str
    :param userID: 4-character SEISAN USER ID
    :type outdir: str
    :param outdir: Location to write S-file
    :type overwrite: bool
    :param overwrite: Overwrite an existing S-file, default=False
    :type evtime: obspy.core.utcdatetime.UTCDateTime
    :param evtime: If given this will set the timing of the S-file

    :returns: str, S-file name

    >>> from eqcorrscan.utils.sfile_util import readwavename
    >>> import os
    >>> wavefile = os.path.join('eqcorrscan', 'tests', 'test_data', 'WAV',
    ...                         'TEST_', '2013-09-01-0410-35.DFDPC_024_00')
    >>> sfile = blanksfile(wavefile, 'L', 'TEST',
    ...                    '.', overwrite=True)
    Written s-file: ./01-0410-35L.S201309
    >>> readwavename(sfile)
    ['2013-09-01-0410-35.DFDPC_024_00']
    """

    from obspy import read as obsread
    import os
    import datetime

    if not evtime:
        try:
            st = obsread(wavefile)
            evtime = st[0].stats.starttime
        except:
            raise IOError('Wavefile: ' + wavefile +
                          ' is invalid, try again with real data.')
    # Check that user ID is the correct length
    if len(userID) != 4:
        raise IOError('User ID must be 4 characters long')
    # Check that outdir exists
    if not os.path.isdir(outdir):
        raise IOError('Out path does not exist, I will not create this: ' +
                      outdir)
    # Check that evtype is one of L,R,D
    if evtype not in ['L', 'R', 'D']:
        raise IOError('Event type must be either L, R or D')

    # Generate s-file name in the format dd-hhmm-ss[L,R,D].Syyyymm
    sfile = outdir + '/' + str(evtime.day).zfill(2) + '-' +\
        str(evtime.hour).zfill(2) +\
        str(evtime.minute).zfill(2) + '-' +\
        str(evtime.second).zfill(2) + evtype + '.S' +\
        str(evtime.year) +\
        str(evtime.month).zfill(2)
    # Check is sfile exists
    if os.path.isfile(sfile) and not overwrite:
        print('Desired sfile: ' + sfile + ' exists, will not overwrite')
        for i in range(1, 10):
            sfile = outdir + '/' + str(evtime.day).zfill(2) + '-' +\
                str(evtime.hour).zfill(2) +\
                str(evtime.minute).zfill(2) + '-' +\
                str(evtime.second + i).zfill(2) + evtype + '.S' +\
                str(evtime.year) +\
                str(evtime.month).zfill(2)
            if not os.path.isfile(sfile):
                break
        else:
            msg = 'Tried generated files up to 20s in advance and found ' +\
                'all exist, you need to clean your stuff up!'
            raise IOError(msg)
        # sys.exit()
    f = open(sfile, 'w')
    # Write line 1 of s-file
    f.write(
        str(' ' + str(evtime.year) + ' ' + str(evtime.month).rjust(2) +
            str(evtime.day).rjust(2) + ' ' + str(evtime.hour).rjust(2) +
            str(evtime.minute).rjust(2) + ' ' +
            str(float(evtime.second)).rjust(4) + ' ' + evtype + '1'.rjust(58) +
            '\n'))
    # Write line 2 of s-file
    f.write(
        str(' ACTION:ARG ' + str(datetime.datetime.now().year)[2:4] + '-' +
            str(datetime.datetime.now().month).zfill(2) + '-' +
            str(datetime.datetime.now().day).zfill(2) + ' ' +
            str(datetime.datetime.now().hour).zfill(2) + ':' +
            str(datetime.datetime.now().minute).zfill(2) + ' OP:' +
            userID.ljust(4) + ' STATUS:' + 'ID:'.rjust(18) + str(evtime.year) +
            str(evtime.month).zfill(2) + str(evtime.day).zfill(2) +
            str(evtime.hour).zfill(2) + str(evtime.minute).zfill(2) +
            str(evtime.second).zfill(2) + 'I'.rjust(6) + '\n'))
    # Write line 3 of s-file
    write_wavfile = wavefile.split(os.sep)[-1]
    f.write(
        str(' ' + write_wavfile + '6'.rjust(79 - len(write_wavfile)) + '\n'))
    # Write final line of s-file
    f.write(
        str(' STAT SP IPHASW D HRMM SECON CODA AMPLIT PERI AZIMU' +
            ' VELO AIN AR TRES W  DIS CAZ7\n'))
    f.close()
    print('Written s-file: ' + sfile)
    return sfile
Ejemplo n.º 4
0
def brightness(stations, nodes, lags, stream, threshold, thresh_type,
               template_length, template_saveloc, coherence_thresh,
               coherence_stations=['all'], coherence_clip=False,
               gap=2.0, clip_level=100, instance=0, pre_pick=0.2,
               plotsave=True, cores=1):
    r"""Function to calculate the brightness function in terms of energy for \
    a day of data over the entire network for a given grid of nodes.

    Note data in stream must be all of the same length and have the same
    sampling rates.

    :type stations: list
    :param stations: List of station names from in the form where stations[i] \
        refers to nodes[i][:] and lags[i][:]
    :type nodes: list, tuple
    :param nodes: List of node points where nodes[i] referes to stations[i] \
        and nodes[:][:][0] is latitude in degrees, nodes[:][:][1] is \
        longitude in degrees, nodes[:][:][2] is depth in km.
    :type lags: :class: 'numpy.array'
    :param lags: Array of arrays where lags[i][:] refers to stations[i]. \
        lags[i][j] should be the delay to the nodes[i][j] for stations[i] in \
        seconds.
    :type stream: :class: `obspy.Stream`
    :param data: Data through which to look for detections.
    :type threshold: float
    :param threshold: Threshold value for detection of template within the \
        brightness function
    :type thresh_type: str
    :param thresh_type: Either MAD or abs where MAD is the Median Absolute \
        Deviation and abs is an absoulte brightness.
    :type template_length: float
    :param template_length: Length of template to extract in seconds
    :type template_saveloc: str
    :param template_saveloc: Path of where to save the templates.
    :type coherence_thresh: tuple of floats
    :param coherence_thresh: Threshold for removing incoherant peaks in the \
            network response, those below this will not be used as templates. \
            Must be in the form of (a,b) where the coherence is given by: \
            a-kchan/b where kchan is the number of channels used to compute \
            the coherence
    :type coherence_stations: list
    :param coherence_stations: List of stations to use in the coherance \
            thresholding - defaults to 'all' which uses all the stations.
    :type coherence_clip: float
    :param coherence_clip: tuple
    :type coherence_clip: Start and end in seconds of data to window around, \
            defaults to False, which uses all the data given.
    :type pre_pick: float
    :param pre_pick: Seconds before the detection time to include in template
    :type plotsave: bool
    :param plotsave: Save or show plots, if False will try and show the plots \
            on screen - as this is designed for bulk use this is set to \
            True to save any plots rather than show them if you create \
            them - changes the backend of matplotlib, so if is set to \
            False you will see NO PLOTS!
    :type cores: int
    :param core: Number of cores to use, defaults to 1.
    :type clip_level: float
    :param clip_level: Multiplier applied to the mean deviation of the energy \
                    as an upper limit, used to remove spikes (earthquakes, \
                    lightning, electircal spikes) from the energy stack.
    :type gap: float
    :param gap: Minimum inter-event time in seconds for detections

    :return: list of templates as :class: `obspy.Stream` objects
    """
    from eqcorrscan.core.template_gen import _template_gen
    if plotsave:
        import matplotlib
        matplotlib.use('Agg')
        import matplotlib.pyplot as plt
        plt.ioff()
    # from joblib import Parallel, delayed
    from multiprocessing import Pool, cpu_count
    from copy import deepcopy
    from obspy import read as obsread
    from obspy.core.event import Catalog, Event, Pick, WaveformStreamID, Origin
    from obspy.core.event import EventDescription, CreationInfo, Comment
    import obspy.Stream
    import matplotlib.pyplot as plt
    from eqcorrscan.utils import plotting
    # Check that we actually have the correct stations
    realstations = []
    for station in stations:
        st = stream.select(station=station)
        if st:
            realstations += station
    del st
    stream_copy = stream.copy()
    # Force convert to int16
    for tr in stream_copy:
        # int16 max range is +/- 32767
        if max(abs(tr.data)) > 32767:
            tr.data = 32767 * (tr.data / max(abs(tr.data)))
            # Make sure that the data aren't clipped it they are high gain
            # scale the data
        tr.data = tr.data.astype(np.int16)
    # The internal _node_loop converts energy to int16 too to converse memory,
    # to do this it forces the maximum of a single energy trace to be 500 and
    # normalises to this level - this only works for fewer than 65 channels of
    # data
    if len(stream_copy) > 130:
        raise OverflowError('Too many streams, either re-code and cope with' +
                            'either more memory usage, or less precision, or' +
                            'reduce data volume')
    detections = []
    detect_lags = []
    parallel = True
    plotvar = True
    mem_issue = False
    # Loop through each node in the input
    # Linear run
    print('Computing the energy stacks')
    if not parallel:
        for i in range(0, len(nodes)):
            print(i)
            if not mem_issue:
                j, a = _node_loop(stations, lags[:, i], stream, plot=True)
                if 'energy' not in locals():
                    energy = a
                else:
                    energy = np.concatenate((energy, a), axis=0)
                print('energy: ' + str(np.shape(energy)))
            else:
                j, filename = _node_loop(stations, lags[:, i], stream, i,
                                         mem_issue)
        energy = np.array(energy)
        print(np.shape(energy))
    else:
        # Parallel run
        num_cores = cores
        if num_cores > len(nodes):
            num_cores = len(nodes)
        if num_cores > cpu_count():
            num_cores = cpu_count()
        pool = Pool(processes=num_cores)
        results = [pool.apply_async(_node_loop, args=(stations, lags[:, i],
                                                      stream, i, clip_level,
                                                      mem_issue, instance))
                   for i in range(len(nodes))]
        pool.close()
        if not mem_issue:
            print('Computing the cumulative network response from memory')
            energy = [p.get() for p in results]
            pool.join()
            energy.sort(key=lambda tup: tup[0])
            energy = [node[1] for node in energy]
            energy = np.concatenate(energy, axis=0)
            print(energy.shape)
        else:
            pool.join()
    # Now compute the cumulative network response and then detect possible
    # events
    if not mem_issue:
        print(energy.shape)
        indeces = np.argmax(energy, axis=0)  # Indeces of maximum energy
        print(indeces.shape)
        cum_net_resp = np.array([np.nan] * len(indeces))
        cum_net_resp[0] = energy[indeces[0]][0]
        peak_nodes = [nodes[indeces[0]]]
        for i in range(1, len(indeces)):
            cum_net_resp[i] = energy[indeces[i]][i]
            peak_nodes.append(nodes[indeces[i]])
        del energy, indeces
    else:
        print('Reading the temp files and computing network response')
        node_splits = int(len(nodes) // num_cores)
        indeces = [range(node_splits)]
        for i in range(1, num_cores - 1):
            indeces.append(range(node_splits * i, node_splits * (i + 1)))
        indeces.append(range(node_splits * (i + 1), len(nodes)))
        pool = Pool(processes=num_cores)
        results = [pool.apply_async(_cum_net_resp, args=(indeces[i], instance))
                   for i in range(num_cores)]
        pool.close()
        results = [p.get() for p in results]
        pool.join()
        responses = [result[0] for result in results]
        print(np.shape(responses))
        node_indeces = [result[1] for result in results]
        cum_net_resp = np.array(responses)
        indeces = np.argmax(cum_net_resp, axis=0)
        print(indeces.shape)
        print(cum_net_resp.shape)
        cum_net_resp = np.array([cum_net_resp[indeces[i]][i]
                                 for i in range(len(indeces))])
        peak_nodes = [nodes[node_indeces[indeces[i]][i]]
                      for i in range(len(indeces))]
        del indeces, node_indeces
    if plotvar:
        cum_net_trace = deepcopy(stream[0])
        cum_net_trace.data = cum_net_resp
        cum_net_trace.stats.station = 'NR'
        cum_net_trace.stats.channel = ''
        cum_net_trace.stats.network = 'Z'
        cum_net_trace.stats.location = ''
        cum_net_trace.stats.starttime = stream[0].stats.starttime
        cum_net_trace = obspy.Stream(cum_net_trace)
        cum_net_trace += stream.select(channel='*N')
        cum_net_trace += stream.select(channel='*1')
        cum_net_trace.sort(['network', 'station', 'channel'])
        # np.save('cum_net_resp.npy',cum_net_resp)
        #     cum_net_trace.plot(size=(800,600), equal_scale=False,\
        #                        outfile='NR_timeseries.eps')

    # Find detection within this network response
    print('Finding detections in the cumulatve network response')
    detections = _find_detections(cum_net_resp, peak_nodes, threshold,
                                  thresh_type, stream[0].stats.sampling_rate,
                                  realstations, gap)
    del cum_net_resp
    templates = []
    nodesout = []
    good_detections = []
    if detections:
        print('Converting detections in to templates')
        # Generate a catalog of detections
        detections_cat = Catalog()
        for j, detection in enumerate(detections):
            print('Converting for detection ' + str(j) + ' of ' +
                  str(len(detections)))
            # Create an event for each detection
            event = Event()
            # Set up some header info for the event
            event.event_descriptions.append(EventDescription())
            event.event_descriptions[0].text = 'Brightness detection'
            event.creation_info = CreationInfo(agency_id='EQcorrscan')
            copy_of_stream = deepcopy(stream_copy)
            # Convert detections to obspy.core.event type -
            # name of detection template is the node.
            node = (detection.template_name.split('_')[0],
                    detection.template_name.split('_')[1],
                    detection.template_name.split('_')[2])
            print(node)
            # Look up node in nodes and find the associated lags
            index = nodes.index(node)
            detect_lags = lags[:, index]
            ksta = Comment(text='Number of stations=' + len(detect_lags))
            event.origins.append(Origin())
            event.origins[0].comments.append(ksta)
            event.origins[0].time = copy_of_stream[0].stats.starttime +\
                detect_lags[0] + detection.detect_time
            event.origins[0].latitude = node[0]
            event.origins[0].longitude = node[1]
            event.origins[0].depth = node[2]
            for i, detect_lag in enumerate(detect_lags):
                station = stations[i]
                st = copy_of_stream.select(station=station)
                if len(st) != 0:
                    for tr in st:
                        _waveform_id = WaveformStreamID(station_code=tr.stats.
                                                        station,
                                                        channel_code=tr.stats.
                                                        channel,
                                                        network_code='NA')
                        event.picks.append(Pick(waveform_id=_waveform_id,
                                                time=tr.stats.starttime +
                                                detect_lag +
                                                detection.detect_time +
                                                pre_pick,
                                                onset='emergent',
                                                evalutation_mode='automatic'))
            print('Generating template for detection: ' + str(j))
            template = (_template_gen(event.picks, copy_of_stream,
                        template_length, 'all'))
            template_name = template_saveloc + '/' +\
                str(template[0].stats.starttime) + '.ms'
            # In the interests of RAM conservation we write then read
            # Check coherancy here!
            temp_coher, kchan = coherence(template, coherence_stations,
                                          coherence_clip)
            coh_thresh = float(coherence_thresh[0]) - kchan / \
                float(coherence_thresh[1])
            if temp_coher > coh_thresh:
                template.write(template_name, format="MSEED")
                print('Written template as: ' + template_name)
                print('---------------------------------coherence LEVEL: ' +
                      str(temp_coher))
                coherant = True
            else:
                print('Template was incoherant, coherence level: ' +
                      str(temp_coher))
                coherant = False
            del copy_of_stream, tr, template
            if coherant:
                templates.append(obsread(template_name))
                nodesout += [node]
                good_detections.append(detection)
            else:
                print('No template for you')
    if plotvar:
        all_detections = [(cum_net_trace[-1].stats.starttime +
                           detection.detect_time).datetime
                          for detection in detections]
        good_detections = [(cum_net_trace[-1].stats.starttime +
                            detection.detect_time).datetime
                           for detection in good_detections]
        if not plotsave:
            plotting.NR_plot(cum_net_trace[0:-1],
                             obspy.Stream(cum_net_trace[-1]),
                             detections=good_detections,
                             size=(18.5, 10),
                             title='Network response')
            # cum_net_trace.plot(size=(800,600), equal_scale=False)
        else:
            savefile = 'plots/' +\
                cum_net_trace[0].stats.starttime.datetime.strftime('%Y%m%d') +\
                '_NR_timeseries.pdf'
            plotting.NR_plot(cum_net_trace[0:-1],
                             obspy.Stream(cum_net_trace[-1]),
                             detections=good_detections,
                             size=(18.5, 10), save=savefile,
                             title='Network response')
    nodesout = list(set(nodesout))
    return templates, nodesout
Ejemplo n.º 5
0
def from_sfile(sfile, lowcut, highcut, samp_rate, filt_order, length, swin,
               prepick=0.05, debug=0, plot=False):
    """
    Generate multiplexed template from a Nordic (Seisan) s-file.
    Function to read in picks from sfile then generate the template from \
    the picks within this and the wavefile found in the pick file.

    :type sfile: str
    :param sfile: sfilename must be the \
        path to a seisan nordic type s-file containing waveform and pick \
        information.
    :type lowcut: float
    :param lowcut: Low cut (Hz), if set to None will look in template \
            defaults file
    :type highcut: float
    :param highcut: High cut (Hz), if set to None will look in template \
            defaults file
    :type samp_rate: float
    :param samp_rate: New sampling rate in Hz, if set to None will look in \
            template defaults file
    :type filt_order: int
    :param filt_order: Filter level, if set to None will look in \
            template defaults file
    :type swin: str
    :param swin: Either 'all', 'P' or 'S', to select which phases to output.
    :type length: float
    :param length: Extract length in seconds, if None will look in template \
            defaults file.
    :type prepick: float
    :param prepick: Length to extract prior to the pick in seconds.
    :type debug: int
    :param debug: Debug level, higher number=more output.
    :type plot: bool
    :param plot: Turns template plotting on or off.

    :returns: obspy.core.stream.Stream Newly cut template

    .. warning:: This will use whatever data is pointed to in the s-file, if \
        this is not the coninuous data, we recommend using other functions. \
        Differences in processing between short files and day-long files \
        (inherent to resampling) will produce lower cross-correlations.

    .. rubric:: Example

    >>> from eqcorrscan.core.template_gen import from_sfile
    >>> sfile = 'eqcorrscan/tests/test_data/REA/TEST_/01-0411-15L.S201309'
    >>> template = from_sfile(sfile=sfile, lowcut=5.0, highcut=15.0,
    ...                       samp_rate=50.0, filt_order=4, swin='P',
    ...                       prepick=0.2, length=6)
    >>> print(len(template))
    15
    >>> print(template[0].stats.sampling_rate)
    50.0
    >>> template.plot(equal_scale=False, size=(800,600)) # doctest: +SKIP

    .. plot::

        from eqcorrscan.core.template_gen import from_sfile
        import os
        sfile = os.path.realpath('../../..') + \
            '/tests/test_data/REA/TEST_/01-0411-15L.S201309'
        template = from_sfile(sfile=sfile, lowcut=5.0, highcut=15.0,
                              samp_rate=50.0, filt_order=4, swin='P',
                              prepick=0.2, length=6)
        template.plot(equal_scale=False, size=(800, 600))
    """
    # Perform some checks first
    import os
    if not os.path.isfile(sfile):
        raise IOError('sfile does not exist')

    from eqcorrscan.utils import pre_processing
    from eqcorrscan.utils import sfile_util
    from obspy import read as obsread
    # Read in the header of the sfile
    wavefiles = sfile_util.readwavename(sfile)
    pathparts = sfile.split('/')[0:-1]
    new_path_parts = []
    for part in pathparts:
        if part == 'REA':
            part = 'WAV'
        new_path_parts.append(part)
    main_wav_parts = []
    for part in new_path_parts:
        main_wav_parts.append(part)
        if part == 'WAV':
            break
    mainwav = os.path.join(*main_wav_parts) + os.path.sep
    # * argument to allow .join() to accept a list
    wavpath = os.path.join(*new_path_parts) + os.path.sep
    # In case of absolute paths (not handled with .split() --> .join())
    if sfile[0] == os.path.sep:
        wavpath = os.path.sep + wavpath
        mainwav = os.path.sep + mainwav
    # Read in waveform file
    for wavefile in wavefiles:
        if debug > 0:
            print(''.join(["I am going to read waveform data from: ", wavpath,
                           wavefile]))
        if 'st' not in locals():
            if os.path.isfile(wavpath + wavefile):
                st = obsread(wavpath + wavefile)
            elif os.path.isfile(wavefile):
                st = obsread(wavefile)
            else:
                # Read from the main WAV directory
                st = obsread(mainwav + wavefile)
        else:
            if os.path.isfile(wavpath + wavefile):
                st += obsread(wavpath + wavefile)
            elif os.path.isfile(wavefile):
                st += obsread(wavefile)
            else:
                st += obsread(mainwav + wavefile)
    for tr in st:
        if tr.stats.sampling_rate < samp_rate:
            print('Sampling rate of data is lower than sampling rate asked ' +
                  'for')
            print('Not good practice for correlations: I will not do this')
            raise ValueError("Trace: " + tr.stats.station +
                             " sampling rate: " + str(tr.stats.sampling_rate))
    # Read in pick info
    event = sfile_util.readpicks(sfile)
    # Read the list of Picks for this event
    picks = event.picks
    if debug > 0:
        print("I have found the following picks")
        for pick in picks:
            print(' '.join([pick.waveform_id.station_code,
                            pick.waveform_id.channel_code, pick.phase_hint,
                            str(pick.time)]))
    # Process waveform data
    st.merge(fill_value='interpolate')
    st = pre_processing.shortproc(st, lowcut, highcut, filt_order,
                                  samp_rate, debug)
    st1 = _template_gen(picks=picks, st=st, length=length, swin=swin,
                        prepick=prepick, plot=plot, debug=debug)
    return st1
Ejemplo n.º 6
0
def brightness(
    stations,
    nodes,
    lags,
    stream,
    threshold,
    thresh_type,
    template_length,
    template_saveloc,
    coherence_thresh,
    coherence_stations=["all"],
    coherence_clip=False,
    gap=2.0,
    clip_level=100,
    instance=0,
    pre_pick=0.2,
    plotsave=True,
    cores=1,
):
    r"""Function to calculate the brightness function in terms of energy for\
    a day of data over the entire network for a given grid of nodes.

    Note data in stream must be all of the same length and have the same
    sampling rates.

    :type stations: list
    :param stations: List of station names from in the form where stations[i]\
    refers to nodes[i][:] and lags[i][:]
    :type nodes: list, tuple
    :param nodes: List of node points where nodes[i] referes to stations[i]\
    and nodes[:][:][0] is latitude in degrees, nodes[:][:][1] is longitude in\
    degrees, nodes[:][:][2] is depth in km.
    :type lags: :class: 'numpy.array'
    :param lags: Array of arrays where lags[i][:] refers to stations[i].\
    lags[i][j] should be the delay to the nodes[i][j] for stations[i] in\
    seconds.
    :type stream: :class: `obspy.Stream`
    :param data: Data through which to look for detections.
    :type threshold: float
    :param threshold: Threshold value for detection of template within the\
    brightness function
    :type thresh_type: str
    :param thresh_type: Either MAD or abs where MAD is the Median Absolute\
    Deviation and abs is an absoulte brightness.
    :type template_length: float
    :param template_length: Length of template to extract in seconds
    :type template_saveloc: str
    :param template_saveloc: Path of where to save the templates.
    :type coherence_thresh: tuple of floats
    :param coherence_thresh: Threshold for removing incoherant peaks in the\
            network response, those below this will not be used as templates.\
            Must be in the form of (a,b) where the coherence is given by:\
            a-kchan/b where kchan is the number of channels used to compute\
            the coherence
    :type coherence_stations: list
    :param coherence_stations: List of stations to use in the coherance\
            thresholding - defaults to 'all' which uses all the stations.
    :type coherence_clip: float
    :param coherence_clip: tuple
    :type coherence_clip: Start and end in seconds of data to window around,\
            defaults to False, which uses all the data given.
    :type pre_pick: float
    :param pre_pick: Seconds before the detection time to include in template
    :type plotsave: bool
    :param plotsave: Save or show plots, if False will try and show the plots\
            on screen - as this is designed for bulk use this is set to\
            True to save any plots rather than show them if you create\
            them - changes the backend of matplotlib, so if is set to\
            False you will see NO PLOTS!
    :type cores: int
    :param core: Number of cores to use, defaults to 1.
    :type clip_level: float
    :param clip_level: Multiplier applied to the mean deviation of the energy\
                    as an upper limit, used to remove spikes (earthquakes, \
                    lightning, electircal spikes) from the energy stack.
    :type gap: float
    :param gap: Minimum inter-event time in seconds for detections

    :return: list of templates as :class: `obspy.Stream` objects
    """
    from eqcorrscan.core.template_gen import _template_gen

    if plotsave:
        import matplotlib

        matplotlib.use("Agg")
        import matplotlib.pyplot as plt

        plt.ioff()
    # from joblib import Parallel, delayed
    from multiprocessing import Pool, cpu_count
    from eqcorrscan.utils.Sfile_util import PICK
    from copy import deepcopy
    from obspy import read as obsread
    import obspy.Stream
    import matplotlib.pyplot as plt
    from eqcorrscan.utils import EQcorrscan_plotting as plotting

    # Check that we actually have the correct stations
    realstations = []
    for station in stations:
        st = stream.select(station=station)
        if st:
            realstations += station
    del st
    stream_copy = stream.copy()
    # Force convert to int16
    for tr in stream_copy:
        # int16 max range is +/- 32767
        if max(abs(tr.data)) > 32767:
            tr.data = 32767 * (tr.data / max(abs(tr.data)))
            # Make sure that the data aren't clipped it they are high gain
            # scale the data
        tr.data = tr.data.astype(np.int16)
    # The internal _node_loop converts energy to int16 too to converse memory,
    # to do this it forces the maximum of a single energy trace to be 500 and
    # normalises to this level - this only works for fewer than 65 channels of
    # data
    if len(stream_copy) > 130:
        raise OverflowError(
            "Too many streams, either re-code and cope with"
            + "either more memory usage, or less precision, or"
            + "reduce data volume"
        )
    detections = []
    detect_lags = []
    parallel = True
    plotvar = True
    mem_issue = False
    # Loop through each node in the input
    # Linear run
    print "Computing the energy stacks"
    if not parallel:
        for i in range(0, len(nodes)):
            print i
            if not mem_issue:
                j, a = _node_loop(stations, lags[:, i], stream, plot=True)
                if "energy" not in locals():
                    energy = a
                else:
                    energy = np.concatenate((energy, a), axis=0)
                print "energy: " + str(np.shape(energy))
            else:
                j, filename = _node_loop(stations, lags[:, i], stream, i, mem_issue)
        energy = np.array(energy)
        print np.shape(energy)
    else:
        # Parallel run
        num_cores = cores
        if num_cores > len(nodes):
            num_cores = len(nodes)
        if num_cores > cpu_count():
            num_cores = cpu_count()
        pool = Pool(processes=num_cores, maxtasksperchild=None)
        results = [
            pool.apply_async(_node_loop, args=(stations, lags[:, i], stream, i, clip_level, mem_issue, instance))
            for i in range(len(nodes))
        ]
        pool.close()
        if not mem_issue:
            print "Computing the cumulative network response from memory"
            energy = [p.get() for p in results]
            pool.join()
            energy.sort(key=lambda tup: tup[0])
            energy = [node[1] for node in energy]
            energy = np.concatenate(energy, axis=0)
            print energy.shape
        else:
            pool.join()
    # Now compute the cumulative network response and then detect possible
    # events
    if not mem_issue:
        print energy.shape
        indeces = np.argmax(energy, axis=0)  # Indeces of maximum energy
        print indeces.shape
        cum_net_resp = np.array([np.nan] * len(indeces))
        cum_net_resp[0] = energy[indeces[0]][0]
        peak_nodes = [nodes[indeces[0]]]
        for i in range(1, len(indeces)):
            cum_net_resp[i] = energy[indeces[i]][i]
            peak_nodes.append(nodes[indeces[i]])
        del energy, indeces
    else:
        print "Reading the temp files and computing network response"
        node_splits = len(nodes) / num_cores
        indeces = [range(node_splits)]
        for i in range(1, num_cores - 1):
            indeces.append(range(node_splits * i, node_splits * (i + 1)))
        indeces.append(range(node_splits * (i + 1), len(nodes)))
        pool = Pool(processes=num_cores, maxtasksperchild=None)
        results = [pool.apply_async(_cum_net_resp, args=(indeces[i], instance)) for i in range(num_cores)]
        pool.close()
        results = [p.get() for p in results]
        pool.join()
        responses = [result[0] for result in results]
        print np.shape(responses)
        node_indeces = [result[1] for result in results]
        cum_net_resp = np.array(responses)
        indeces = np.argmax(cum_net_resp, axis=0)
        print indeces.shape
        print cum_net_resp.shape
        cum_net_resp = np.array([cum_net_resp[indeces[i]][i] for i in range(len(indeces))])
        peak_nodes = [nodes[node_indeces[indeces[i]][i]] for i in range(len(indeces))]
        del indeces, node_indeces
    if plotvar:
        cum_net_trace = deepcopy(stream[0])
        cum_net_trace.data = cum_net_resp
        cum_net_trace.stats.station = "NR"
        cum_net_trace.stats.channel = ""
        cum_net_trace.stats.network = "Z"
        cum_net_trace.stats.location = ""
        cum_net_trace.stats.starttime = stream[0].stats.starttime
        cum_net_trace = obspy.Stream(cum_net_trace)
        cum_net_trace += stream.select(channel="*N")
        cum_net_trace += stream.select(channel="*1")
        cum_net_trace.sort(["network", "station", "channel"])
        # np.save('cum_net_resp.npy',cum_net_resp)
        #     cum_net_trace.plot(size=(800,600), equal_scale=False,\
        #                        outfile='NR_timeseries.eps')

    # Find detection within this network response
    print "Finding detections in the cumulatve network response"
    detections = _find_detections(
        cum_net_resp, peak_nodes, threshold, thresh_type, stream[0].stats.sampling_rate, realstations, gap
    )
    del cum_net_resp
    templates = []
    nodesout = []
    good_detections = []
    if detections:
        print "Converting detections in to templates"
        for j, detection in enumerate(detections):
            print "Converting for detection " + str(j) + " of " + str(len(detections))
            copy_of_stream = deepcopy(stream_copy)
            # Convert detections to PICK type - name of detection template
            # is the node.
            node = (
                detection.template_name.split("_")[0],
                detection.template_name.split("_")[1],
                detection.template_name.split("_")[2],
            )
            print node
            # Look up node in nodes and find the associated lags
            index = nodes.index(node)
            detect_lags = lags[:, index]
            picks = []
            for i, detect_lag in enumerate(detect_lags):
                station = stations[i]
                st = copy_of_stream.select(station=station)
                if len(st) != 0:
                    for tr in st:
                        picks.append(
                            PICK(
                                station=station,
                                channel=tr.stats.channel,
                                impulsivity="E",
                                phase="S",
                                weight="3",
                                polarity="",
                                time=tr.stats.starttime + detect_lag + detection.detect_time - pre_pick,
                                coda="",
                                amplitude="",
                                peri="",
                                azimuth="",
                                velocity="",
                                AIN="",
                                SNR="",
                                azimuthres="",
                                timeres="",
                                finalweight="",
                                distance="",
                                CAZ="",
                            )
                        )
            print "Generating template for detection: " + str(j)
            template = _template_gen(picks, copy_of_stream, template_length, "all")
            template_name = template_saveloc + "/" + str(template[0].stats.starttime) + ".ms"
            # In the interests of RAM conservation we write then read
            # Check coherancy here!
            temp_coher, kchan = coherence(template, coherence_stations, coherence_clip)
            coh_thresh = float(coherence_thresh[0]) - kchan / float(coherence_thresh[1])
            if temp_coher > coh_thresh:
                template.write(template_name, format="MSEED")
                print "Written template as: " + template_name
                print "---------------------------------coherence LEVEL: " + str(temp_coher)
                coherant = True
            else:
                print "Template was incoherant, coherence level: " + str(temp_coher)
                coherant = False
            del copy_of_stream, tr, template
            if coherant:
                templates.append(obsread(template_name))
                nodesout += [node]
                good_detections.append(detection)
            else:
                print "No template for you"
    if plotvar:
        all_detections = [
            (cum_net_trace[-1].stats.starttime + detection.detect_time).datetime for detection in detections
        ]
        good_detections = [
            (cum_net_trace[-1].stats.starttime + detection.detect_time).datetime for detection in good_detections
        ]
        if not plotsave:
            plotting.NR_plot(
                cum_net_trace[0:-1],
                obspy.Stream(cum_net_trace[-1]),
                detections=good_detections,
                size=(18.5, 10),
                title="Network response",
            )
            # cum_net_trace.plot(size=(800,600), equal_scale=False)
        else:
            savefile = "plots/" + cum_net_trace[0].stats.starttime.datetime.strftime("%Y%m%d") + "_NR_timeseries.pdf"
            plotting.NR_plot(
                cum_net_trace[0:-1],
                obspy.Stream(cum_net_trace[-1]),
                detections=good_detections,
                size=(18.5, 10),
                save=savefile,
                title="Network response",
            )
    nodesout = list(set(nodesout))
    return templates, nodesout
Ejemplo n.º 7
0
def cjc_trigger_routine(startdate,enddate,dataloc,trigloc,routype):
    """
    Module to run the obspy sta-lta energy based filter routine

    Must be parsed start date & end date in obspy UTCDateTime type,
    dataloc should be a string of the path for the input data
    trigloc should be a string of the ouput path
    routype should be a string denpoting the type of detection routine to use
        either classic or carl
    defaults have been set in the module for trigger parameters
    """

###############################################################################
    # Import parameter settings
    import sys
    sys.path.insert(0,"/home/calumch/my_programs/Building/rt2detection")
    from par import trigger_par as defaults
    print defaults.stalen
###############################################################################

# Format dates
    startyear=startdate.split('/')[0]
    startmonth=startdate.split('/')[1]
    startday=startdate.split('/')[2]
    endyear=enddate.split('/')[0]
    endmonth=enddate.split('/')[1]
    endday=enddate.split('/')[2]

# Import modules
    from obspy import read as obsread
    from obspy import UTCDateTime
    import glob, os
    import numpy as np
    from obspy.signal import coincidenceTrigger


# Generate list of days to check through
    lengthinseconds=UTCDateTime(endyear+' '+endmonth+' '+endday)-\
            UTCDateTime(startyear+' '+startmonth+' '+startday)
    lendays=lengthinseconds/86400
    lengthinseconds=[]
    dfiles=[]
    dates=[]
    for i in range(0,int(lendays)+1):
        dates.append(UTCDateTime(startyear+' '+startmonth+' '+startday)+(i*86400))
        dfiles.extend(glob.glob(dataloc+'/'+str(dates[i].year)+'/'+\
                str(dates[i].month).zfill(2)+'/'+str(dates[i].year)+'-'+\
                str(dates[i].month).zfill(2)+'-'+str(dates[i].day).zfill(2)+'*'))

    print len(dfiles)
    wavelist=[] # Initialize list variable
    # Read in data
    for hfile in dfiles:
        print 'Working on file: '+hfile
        st=obsread(hfile)
        st1=st.copy()
        if not defaults.comp=='all':
            st1=st1.select(channel='*'+defaults.comp)
        # De-mean data
        for tr in st:
            tr.data=tr.data-np.mean(tr.data)
        # Filter data
        st1.filter('bandpass',freqmin=defaults.lowcut,freqmax=defaults.highcut)
        # Use the obspy triggering routine
        trig=[]
        if routype=='classic':
            trig = coincidenceTrigger("recstalta",defaults.trigon,\
                                      defaults.trigoff,st1,defaults.netsum,\
                                      sta=defaults.stalen,lta=defaults.ltalen,\
                                      delete_long_trigger='True',\
                                      trigger_off_extension=\
                                      defaults.netwin)
        else:
            try:
                trig = coincidenceTrigger("carlstatrig",defaults.trigon,\
                                          defaults.trigoff,st1,\
                                          defaults.netsum,sta=defaults.stalen,\
                                          lta=defaults.ltalen,ratio=defaults.crat,\
                                          quiet=defaults.cquite,delete_long_trigger='True')
            except:
                print 'Triggering routine failed, suggest altering parameters'
        # Cut data and write out in multiplexed miniseed files
        if trig and defaults.trigout=='Y':
            for event in trig:
                stout=st.slice(event['time']-defaults.precut,event['time']+defaults.postcut)
                filename=str(stout[0].stats.starttime.year)+'-'+\
                        str(stout[0].stats.starttime.month).zfill(2)+'-'+\
                        str(stout[0].stats.starttime.day).zfill(2)+'-'+\
                        str(stout[0].stats.starttime.hour).zfill(2)+\
                        str(stout[0].stats.starttime.minute).zfill(2)+'-'+\
                        str(stout[0].stats.starttime.second).zfill(2)+'.'+\
                        defaults.net+'_'+str(len(stout)).zfill(3)+'_00'
                if not os.path.isdir(trigloc+'/'+\
                        str(stout[0].stats.starttime.year)):
                    os.makedirs(trigloc+'/'+str(stout[0].stats.starttime.year))
                if not os.path.isdir(trigloc+'/'+str(stout[0].stats.starttime.year)\
                        +'/'+str(stout[0].stats.starttime.month).zfill(2)):
                    os.makedirs(trigloc+'/'+str(stout[0].stats.starttime.year)\
                            +'/'+str(stout[0].stats.starttime.month).zfill(2))
                filename=trigloc+'/'+str(stout[0].stats.starttime.year)+'/'+\
                        str(stout[0].stats.starttime.month).zfill(2)+'/'+\
                        filename
                wavelist.append(filename)
                try:
                    stout.write(filename,format="MSEED",encoding="STEIM2")
                except:
                    # Cope with dtype issues
                    for tr in stout:
                        tr.data = np.array(tr.data, dtype=np.int32)
                    stout.write(filename,format='MSEED',encoding='STEIM2')
                print 'Written triggered file as: '+filename
        elif defaults.trigout=='N':
            print 'Triggers will not be written out but I made '+len(trig)+' detections'
        elif not trig:
            print 'No triggers were detected'
    return wavelist
Ejemplo n.º 8
0
        tr.stats.station = station
        tr.stats.channel = 'S1'
        tr.stats.network = 'SYN'
        tr.stats.sampling_rate = samp_rate
        tr.stats.starttime = starttime
        stream += tr
    ksta += 1
if realstr:
    # stream=obsread('scripts/brightness_test.ms')
    # stream.detrend('demean')
    # stream=obsread('/Volumes/GeoPhysics_09/users-data/chambeca/SAMBA_archive/day_volumes_S/'+\
    # 'Y2011/R247.01/*N.2011.247')
    # stream.detrend('demean')
    # stream.resample(samp_rate)
    # stream.write('scripts/brightness_test_daylong.ms',format='MSEED')
    stream = obsread('scripts/brightness_test_daylong.ms')
    stream.trim(starttime=UTCDateTime('2011-09-04 17:05:00'),\
                endtime=UTCDateTime('2011-09-04 17:15:00'))#, pad=True,\
    # fill_value=0)
    # for tr in stream:
    # if tr.stats.station=='WVZ':
    # stream.remove(tr)
stream.filter('bandpass', freqmin=4.0, freqmax=8.0)
# stream.trim(stream[0].stats.starttime+90, stream[0].stats.endtime)
stream.trim(stream[0].stats.starttime,
            stream[0].stats.endtime,
            pad=True,
            fill_value=0)
stream.plot(size=(800, 600), equal_scale=False)

instance = 0
Ejemplo n.º 9
0
def blanksfile(wavefile,evtype,userID,outdir,overwrite):
    """
    Module to generate an empty s-file with a populated header for a given
    waveform.

###############################################################################

    # Arguments are the path of a wavefile (multiplexed miniseed file required)
    # Event type (L,R,D) and user ID (four characters as used in seisan)

###############################################################################

    # Example s-file format:
    # 2014  719  617 50.2 R                                                         1
    # ACTION:ARG 14-11-11 10:53 OP:CALU STATUS:               ID:20140719061750     I
    # 2014/07/2014-07-19-0617-50.SAMBA_030_00                                       6
    # STAT SP IPHASW D HRMM SECON CODA AMPLIT PERI AZIMU VELO AIN AR TRES W  DIS CAZ7
    """

    from obspy import read as obsread
    import sys,os, datetime
    try:
        st=obsread(wavefile)
    except:
        print 'Wavefile: '+wavefile+' is invalid, try again with real data.'
        sys.exit()
    # Check that user ID is the correct length
    if len(userID) != 4:
        print 'User ID must be 4 characters long'
        sys.exit()
    # Check that outdir exists
    if not os.path.isdir(outdir):
        print 'Out path does not exist, I will not create this: '+outdir
        sys.exit()
    # Check that evtype is one of L,R,D
    if evtype not in ['L','R','D']:
        print 'Event type must be either L, R or D'
        sys.exit()

    # Generate s-file name in the format dd-hhmm-ss[L,R,D].Syyyymm
    sfilename=outdir+'/'+str(st[0].stats.starttime.day).zfill(2)+'-'+\
            str(st[0].stats.starttime.hour).zfill(2)+\
            str(st[0].stats.starttime.minute).zfill(2)+'-'+\
            str(st[0].stats.starttime.second).zfill(2)+evtype+'.S'+\
            str(st[0].stats.starttime.year)+\
            str(st[0].stats.starttime.month).zfill(2)
    # Check is sfilename exists
    if os.path.isfile(sfilename) and overwrite=='False':
        print 'Desired sfilename: '+sfilename+' exists, will not overwrite'
        for i in range(1,10):
            sfilename=outdir+'/'+str(st[0].stats.starttime.day).zfill(2)+'-'+\
                    str(st[0].stats.starttime.hour).zfill(2)+\
                    str(st[0].stats.starttime.minute).zfill(2)+'-'+\
                    str(st[0].stats.starttime.second+i).zfill(2)+evtype+'.S'+\
                    str(st[0].stats.starttime.year)+\
                    str(st[0].stats.starttime.month).zfill(2)
            if not os.path.isfile(sfilename):
                break
        else:
            print 'Tried generated files up to 10s in advance and found they'
            print 'all exist, you need to clean your stuff up!'
            sys.exit()
        # sys.exit()
    f=open(sfilename,'w')
    # Write line 1 of s-file
    f.write(' '+str(st[0].stats.starttime.year)+' '+\
            str(st[0].stats.starttime.month).rjust(2)+\
            str(st[0].stats.starttime.day).rjust(2)+' '+\
            str(st[0].stats.starttime.hour).rjust(2)+\
            str(st[0].stats.starttime.minute).rjust(2)+' '+\
            str(st[0].stats.starttime.second).rjust(4)+' '+\
            evtype+'1'.rjust(58)+'\n')
    # Write line 2 of s-file
    f.write(' ACTION:ARG '+str(datetime.datetime.now().year)[2:4]+'-'+\
            str(datetime.datetime.now().month).zfill(2)+'-'+\
            str(datetime.datetime.now().day).zfill(2)+' '+\
            str(datetime.datetime.now().hour).zfill(2)+':'+\
            str(datetime.datetime.now().minute).zfill(2)+' OP:'+\
            userID.ljust(4)+' STATUS:'+'ID:'.rjust(18)+\
            str(st[0].stats.starttime.year)+\
            str(st[0].stats.starttime.month).zfill(2)+\
            str(st[0].stats.starttime.day).zfill(2)+\
            str(st[0].stats.starttime.hour).zfill(2)+\
            str(st[0].stats.starttime.minute).zfill(2)+\
            str(st[0].stats.starttime.second).zfill(2)+\
            'I'.rjust(6)+'\n')
    # Write line 3 of s-file
    f.write(' '+wavefile+'6'.rjust(79-len(wavefile))+'\n')
    # Write final line of s-file
    f.write(' STAT SP IPHASW D HRMM SECON CODA AMPLIT PERI AZIMU'+\
            ' VELO AIN AR TRES W  DIS CAZ7\n')
    f.close()
    print 'Written s-file: '+sfilename
    return sfilename
Ejemplo n.º 10
0
    current = datetime.utcnow() # make that a variable
    year = current.year
    doy = datetime.strftime(current,"%j")
    datelist = []
    for elem in range(depth):
        datelist.append(datetime.strftime(current,dateformat))
        current = current-timedelta(days=elem+1)

    #filelist = [os.path.join(path,'CONA.HNZ.'+date+'.00.00.00') for date in datelist]
    filelist = [os.path.join(path,'CALY.HFE.'+date+'.00.00.00') for date in datelist]
    print filelist


for fi in filelist:
    #seedx = obsread('/home/leon/Dropbox/Daten/CALY.BFN.2016.278.00.00.00')
    seedy = obsread(fi)
    #seedz = obsread('/home/leon/Dropbox/Daten/CALY.BFZ.2016.278.00.00.00')
    #seed = seedx+seedy+seedz
    #print(seed)
    #  Do whatever you want in ObsPy
    #obj = obspy2magpy(seed,keydict={'OE.CALY..BFN': 'x','OE.CALY..BFE': 'y','OE.CALY..BFZ': 'z'})
    comp = 'y'
    obj = obspy2magpy(seedy,keydict={'OE.CALY..HFE': comp})
    #obj = obspy2magpy(seedy)
    print obj.length(), obj.header
    #  Do whatever you want in MagPy as mpobj is now a MagPy object
    import magpy.mpplot as mp
    #mp.plot(obj)
    mp.plotSpectrogram(obj,[comp])
    #mpobj.write('/home/myuser/mypath',format_type='PYASCII')
Ejemplo n.º 11
0
         for base in matchdef.contbase:
             if base[2]==netcode:
                 contbase=base
         if not 'contbase' in locals():
             raise NameError('contbase not defined for netcode '+netcode)
         if contbase[1]=='yyyymmdd':
             daydir=str(day.year)+str(day.month).zfill(2)+\
                     str(day.day).zfill(2)
         elif contbase[1]=='Yyyyy/Rjjj.01':
             daydir='Y'+str(day.year)+'/R'+str(day.julday).zfill(3)+'.01'
         print '     Reading data from: '
         for chan in useful_chans: # only take N horizontal components
             if glob.glob(contbase[0]+'/'+daydir+'/*'+station+'.*'+chan+'.*'):
                 print contbase[0]+'/'+daydir+'/*'+station+'.*'+chan+'.*'
                 if not 'stream' in locals():
                     stream=obsread(contbase[0]+'/'+daydir+'/*'+station+'.*'+chan+'.*')
                 else:
                     stream+=obsread(contbase[0]+'/'+daydir+'/*'+station+'.*'+chan+'.*')
 else:
     for station in stations:
         fname='test_data/'+station+'-*-'+str(day.year)+\
                            '-'+str(day.month).zfill(2)+\
                            '-'+str(day.day).zfill(2)+'-processed.ms'
         if glob.glob(fname):
             if not 'stream' in locals():
                 stream=obsread(fname)
             else:
                 stream+=obsread(fname)
 # Process the stream
 if not Test:
     print 'Processing the data'
Ejemplo n.º 12
0
def from_contbase(sfile, contbase_list, lowcut, highcut, samp_rate, filt_order,\
                 length, prepick, swin, debug=0):
    """
    Function to read in picks from sfile then generate the template from the
    picks within this and the wavefiles from the continous database of day-long
    files.  Included is a section to sanity check that the files are daylong and
    that they start at the start of the day.  You should ensure this is the case
    otherwise this may alter your data if your data are daylong but the headers
    are incorrectly set.

    :type sfile: string
    :param sfile: sfilename must be the path to a seisan nordic type s-file \
            containing waveform and pick information, all other arguments can \
            be numbers save for swin which must be either P, S or all \
            (case-sensitive).
    :type contbase_list: List of tuple of string
    :param contbase_list: List of tuples of the form ['path', 'type', 'network']\
                    Where path is the path to the continuous database, type is\
                    the directory structure, which can be either Yyyyy/Rjjj.01,\
                    which is the standard IRIS Year, julian day structure, or,\
                    yyyymmdd which is a single directory for every day.
    :type lowcut: float
    :param lowcut: Low cut (Hz), if set to None will look in template\
            defaults file
    :type highcut: float
    :param lowcut: High cut (Hz), if set to None will look in template\
            defaults file
    :type samp_rate: float
    :param samp_rate: New sampling rate in Hz, if set to None will look in\
            template defaults file
    :type filt_order: int
    :param filt_order: Filter level, if set to None will look in\
            template defaults file
    :type length: float
    :param length: Extract length in seconds, if None will look in template\
            defaults file.
    :type prepick: float
    :param prepick: Pre-pick time in seconds
    :type swin: str
    :param swin: Either 'all', 'P' or 'S', to select which phases to output.
    :type debug: int
    :param debug: Level of debugging output, higher=more
    """
    # Perform some checks first
    import os, sys
    if not os.path.isfile(sfile):
        raise IOError('sfile does not exist')

    # import some things
    from eqcorrscan.utils import Sfile_util
    from eqcorrscan.utils import pre_processing
    import glob
    from obspy import UTCDateTime

    # Read in the header of the sfile
    header=Sfile_util.readheader(sfile)
    day=UTCDateTime(str(header.time.year)+'-'+str(header.time.month).zfill(2)+\
                    '-'+str(header.time.day).zfill(2))

    # Read in pick info
    picks=Sfile_util.readpicks(sfile)
    print "I have found the following picks"
    pick_chans=[]
    used_picks=[]
    for pick in picks:
        if not pick.station+pick.channel in pick_chans and pick.phase in ['P','S']:
            pick_chans.append(pick.station+pick.channel)
            used_picks.append(pick)
            print pick
            for contbase in contbase_list:
                if contbase[1] == 'yyyy/mm/dd':
                    daydir=str(day.year)+'/'+str(day.month).zfill(2)+'/'+\
                            str(day.day).zfill(2)
                elif contbase[1]=='Yyyyy/Rjjj.01':
                    daydir='Y'+str(day.year)+'/R'+str(day.julday).zfill(3)+'.01'
                elif contbase[1]=='yyyymmdd':
                    daydir=str(day.year)+str(day.month).zfill(2)+str(day.day).zfill(2)
                if 'wavefiles' in locals():
                    wavefiles+=glob.glob(contbase[0]+'/'+daydir+'/*'+pick.station+\
                    '.*')
                else:
                    wavefiles=(glob.glob(contbase[0]+'/'+daydir+'/*'+pick.station+\
                    '.*'))
        elif pick.phase in ['P','S']:
            print 'Duplicate pick '+pick.station+' '+pick.channel+' '+pick.phase+\
            ' '+str(pick.time)
        elif pick.phase =='IAML':
            print 'Amplitude pick '+pick.station+' '+pick.channel+' '+pick.phase+\
            ' '+str(pick.time)
    picks=used_picks
    wavefiles=list(set(wavefiles))

    # Read in waveform file
    from obspy import read as obsread
    wavefiles.sort()
    for wavefile in wavefiles:
        print "I am going to read waveform data from: "+wavefile
        if 'st' in locals():
            st+=obsread(wavefile)
        else:
            st=obsread(wavefile)
    # Porcess waveform data
    st.merge(fill_value='interpolate')
    for tr in st:
        tr=pre_processing.dayproc(tr, lowcut, highcut, filt_order,\
                                samp_rate, debug, day)
    # Cut and extract the templates
    st1=_template_gen(picks, st, length, swin, prepick=prepick)
    return st1
Ejemplo n.º 13
0
def from_sfile(sfile, lowcut, highcut, samp_rate, filt_order, length, swin,\
               debug=0):
    """
    Function to read in picks from sfile then generate the template from the
    picks within this and the wavefile found in the pick file.

    :type sfile: string
    :param sfile: sfilename must be the\
    path to a seisan nordic type s-file containing waveform and pick\
    information.
    :type lowcut: float
    :param lowcut: Low cut (Hz), if set to None will look in template\
            defaults file
    :type highcut: float
    :param lowcut: High cut (Hz), if set to None will look in template\
            defaults file
    :type samp_rate: float
    :param samp_rate: New sampling rate in Hz, if set to None will look in\
            template defaults file
    :type filt_order: int
    :param filt_order: Filter level, if set to None will look in\
            template defaults file
    :type swin: str
    :param swin: Either 'all', 'P' or 'S', to select which phases to output.
    :type length: float
    :param length: Extract length in seconds, if None will look in template\
            defaults file.
    :type debug: int
    :param debug: Debug level, higher number=more output.
    """
    # Perform some checks first
    import os
    import sys
    if not os.path.isfile(sfile):
        raise IOError('sfile does not exist')

    from eqcorrscan.utils import Sfile_util
    # Read in the header of the sfile
    wavefiles=Sfile_util.readwavename(sfile)
    pathparts=sfile.split('/')[0:len(sfile.split('/'))-1]
    wavpath=''
    for part in pathparts:
        if part == 'REA':
            part='WAV'
        wavpath+=part+'/'
    from obspy import read as obsread
    from eqcorrscan.utils import pre_processing
    # Read in waveform file
    for wavefile in wavefiles:
        print "I am going to read waveform data from: "+wavpath+wavefile
        if 'st' in locals():
            st+=obsread(wavpath+wavefile)
        else:
            st=obsread(wavpath+wavefile)
    for tr in st:
        if tr.stats.sampling_rate < samp_rate:
            print 'Sampling rate of data is lower than sampling rate asked for'
            print 'As this is not good practice for correlations I will not do this'
            raise ValueError("Trace: "+tr.stats.station+" sampling rate: "+\
                             str(tr.stats.sampling_rate))
    # Read in pick info
    picks=Sfile_util.readpicks(sfile)
    print "I have found the following picks"
    for pick in picks:
        print pick.station+' '+pick.channel+' '+pick.phase+' '+str(pick.time)

    # Process waveform data
    st=pre_processing.shortproc(st, lowcut, highcut, filt_order,\
                      samp_rate, debug)
    st1=_template_gen(picks, st, length, swin)
    return st1
Ejemplo n.º 14
0
print 'highcut: '+str(templatedef.highcut)+' Hz'
print 'length: '+str(templatedef.length)+' s'
print 'swin: '+templatedef.swin+'\n'
for sfile in templatedef.sfiles:
    print 'Working on: '+sfile+'\r'
    if not os.path.isfile(templatedef.saveloc+'/'+sfile+'_template.ms'):
        print sfile
        template=template_gen.from_contbase(sfile, tempdef=templatedef, \
                    matchdef=matchdef)

        print 'saving template as: '+templatedef.saveloc+'/'+\
                str(template[0].stats.starttime)+'.ms'
        template.write(templatedef.saveloc+'/'+\
                   sfile+'_template.ms',format="MSEED")
    else:
        template=obsread(templatedef.saveloc+'/'+sfile+'_template.ms')
    templates+=[template]
    # Will read in seisan s-file and generate a template from this,
    # returned name will be the template name, used for parsing to the later
    # functions

# for tfile in templatedef.tfiles:
    # # Loop through pre-existing template files
    # sys.stdout.write("\rReading in pre-existing template: "+tfile+"\r")
    # sys.stdout.flush()
    # templates.append(obsread(tfile))

templates=[obsread(tfile) for tfile in templatedef.tfiles]

print 'Read in '+str(len(templates))+' templates'
Ejemplo n.º 15
0
        tr.stats.station=station
        tr.stats.channel='S1'
        tr.stats.network='SYN'
        tr.stats.sampling_rate=samp_rate
        tr.stats.starttime=starttime
        stream+=tr
    ksta+=1
if realstr:
    # stream=obsread('scripts/brightness_test.ms')
    # stream.detrend('demean')
    # stream=obsread('/Volumes/GeoPhysics_09/users-data/chambeca/SAMBA_archive/day_volumes_S/'+\
                # 'Y2011/R247.01/*N.2011.247')
    # stream.detrend('demean')
    # stream.resample(samp_rate)
    # stream.write('scripts/brightness_test_daylong.ms',format='MSEED')
    stream=obsread('scripts/brightness_test_daylong.ms')
    stream.trim(starttime=UTCDateTime('2011-09-04 17:05:00'),\
                endtime=UTCDateTime('2011-09-04 17:15:00'))#, pad=True,\
               # fill_value=0)
    # for tr in stream:
        # if tr.stats.station=='WVZ':
            # stream.remove(tr)
stream.filter('bandpass',freqmin=4.0, freqmax=8.0)
# stream.trim(stream[0].stats.starttime+90, stream[0].stats.endtime)
stream.trim(stream[0].stats.starttime, stream[0].stats.endtime, pad=True, fill_value=0)
stream.plot(size=(800,600),equal_scale=False)

instance=0

# Cut the nodes...
cutnodes=[nodes[0]]+[nodes[116]]
Ejemplo n.º 16
0
 if baseformat=='Yyyyy/Rjjj.01':
     if glob.glob(contbase[0]+'/'+daydir+'/'+station+'.*.'+channel+\
                  '.'+str(day.year)+'.'+str(day.julday).zfill(3)):
         chan_available=True
     else:
         chan_available=False
 else:
     if glob.glob(contbase[0]+'/'+daydir+'/*'+station+'.'+channel+'.*'):
         chan_available=True
     else:
         chan_available=False
 if chan_available:
     if not 'st' in locals():
         if baseformat=='Yyyyy/Rjjj.01':
             st=obsread(contbase[0]+'/'+daydir+'/*'+station+'.*.'+\
                        channel+'.'+str(day.year)+'.'+\
                        str(day.julday).zfill(3))
         else:
             st=obsread(contbase[0]+'/'+daydir+'/*'+station+'.'+\
                        channel+'.*')
     else:
         if baseformat=='Yyyyy/Rjjj.01':
             st+=obsread(contbase[0]+'/'+daydir+'/*'+station+'.*.'+\
                         channel+'.'+str(day.year)+'.'+\
                         str(day.julday).zfill(3))
         else:
             st+=obsread(contbase[0]+'/'+daydir+'/*'+station+'.'+\
                         channel+'.*')
     actual_stations.append(station) # Add to this list only if we have the data
 else:
     print 'No data for '+stachan+' for day '+daydir+' in '\
Ejemplo n.º 17
0
def from_sfile(sfile, lowcut, highcut, samp_rate, filt_order, length, swin,
               prepick=0.05, debug=0, plot=False):
    r"""Function to read in picks from sfile then generate the template from \
    the picks within this and the wavefile found in the pick file.

    :type sfile: string
    :param sfile: sfilename must be the \
        path to a seisan nordic type s-file containing waveform and pick \
        information.
    :type lowcut: float
    :param lowcut: Low cut (Hz), if set to None will look in template \
            defaults file
    :type highcut: float
    :param highcut: High cut (Hz), if set to None will look in template \
            defaults file
    :type samp_rate: float
    :param samp_rate: New sampling rate in Hz, if set to None will look in \
            template defaults file
    :type filt_order: int
    :param filt_order: Filter level, if set to None will look in \
            template defaults file
    :type swin: str
    :param swin: Either 'all', 'P' or 'S', to select which phases to output.
    :type length: float
    :param length: Extract length in seconds, if None will look in template \
            defaults file.
    :type prepick: float
    :param prepick: Length to extract prior to the pick in seconds.
    :type debug: int
    :param debug: Debug level, higher number=more output.
    :type plot: bool
    :param plot: Turns template plotting on or off.

    :returns: obspy.Stream Newly cut template

    .. warning:: This will use whatever data is pointed to in the s-file, if \
        this is not the coninuous data, we recommend using other functions. \
        Differences in processing between short files and day-long files \
        (inherent to resampling) will produce lower cross-correlations.
    """
    # Perform some checks first
    import os
    if not os.path.isfile(sfile):
        raise IOError('sfile does not exist')

    from eqcorrscan.utils import pre_processing
    from eqcorrscan.utils import sfile_util
    from obspy import read as obsread
    # Read in the header of the sfile
    wavefiles = sfile_util.readwavename(sfile)
    pathparts = sfile.split('/')[0:-1]
    new_path_parts = []
    for part in pathparts:
        if part == 'REA':
            part = 'WAV'
        new_path_parts.append(part)
    # * argument to allow .join() to accept a list
    wavpath = os.path.join(*new_path_parts) + '/'
    # In case of absolute paths (not handled with .split() --> .join())
    if sfile[0] == '/':
        wavpath = '/' + wavpath
    # Read in waveform file
    for wavefile in wavefiles:
        print(''.join(["I am going to read waveform data from: ", wavpath,
                       wavefile]))
        if 'st' not in locals():
            st = obsread(wavpath + wavefile)
        else:
            st += obsread(wavpath + wavefile)
    for tr in st:
        if tr.stats.sampling_rate < samp_rate:
            print('Sampling rate of data is lower than sampling rate asked ' +
                  'for')
            print('Not good practice for correlations: I will not do this')
            raise ValueError("Trace: " + tr.stats.station +
                             " sampling rate: " + str(tr.stats.sampling_rate))
    # Read in pick info
    catalog = sfile_util.readpicks(sfile)
    # Read the list of Picks for this event
    picks = catalog[0].picks
    print("I have found the following picks")
    for pick in picks:
        print(' '.join([pick.waveform_id.station_code,
                        pick.waveform_id.channel_code, pick.phase_hint,
                        str(pick.time)]))

    # Process waveform data
    st.merge(fill_value='interpolate')
    st = pre_processing.shortproc(st, lowcut, highcut, filt_order,
                                  samp_rate, debug)
    st1 = _template_gen(picks=picks, st=st, length=length, swin=swin,
                        prepick=prepick, plot=plot)
    return st1
Ejemplo n.º 18
0
#            tr.stats.network='AF'
#        elif tr.stats.station in ['FRAN','POCR2','WHAT2']:
#            tr.stats.channel='SH2'
#            tr.stats.network='AF'
#    synth.write('templates/synthetics/'+str(nodes[i][0])+'_'+str(nodes[i][1])+\
#                '_'+str(nodes[i][2])+'_template.ms', format='MSEED')#,\
#                #encoding='STEIM2', reclen=512)
#    template_names.append(str(nodes[i][0])+'_'+str(nodes[i][1])+\
#                '_'+str(nodes[i][2]))
#    templates.append(synth)
#    i+=1

#del nodes, travel_time

template_names=glob.glob('templates/synthetics/*_template.ms')
templates=[obsread(tfile) for tfile in template_names]
template_names=[t.split('/')[-1].split('_template.ms')[0] \
                for t in template_names]

print 'We have '+str(len(templates))+' templates with at least five stations'
print 'Working out what stations we have'

stations=[]
for template in templates:
    # Calculate the delays for each template, do this only once so that we
    # don't have to do it heaps!
    # Check that all templates are the correct length
    for tr in template:
        if not templatedef.samp_rate*templatedef.length == tr.stats.npts:
            raise ValueError('Template for '+tr.stats.station+'.'+\
                             tr.stats.channel+' is not the correct length, recut.'+\
Ejemplo n.º 19
0
def from_contbase(sfile, contbase_list, lowcut, highcut, samp_rate, filt_order,
                  length, prepick, swin, debug=0, plot=False):
    r"""Function to read in picks from sfile then generate the template from \
    the picks within this and the wavefiles from the continous database of \
    day-long files.  Included is a section to sanity check that the files are \
    daylong and that they start at the start of the day.  You should ensure \
    this is the case otherwise this may alter your data if your data are \
    daylong but the headers are incorrectly set.

    :type sfile: string
    :param sfile: sfilename must be the path to a seisan nordic type s-file \
            containing waveform and pick information, all other arguments can \
            be numbers save for swin which must be either P, S or all \
            (case-sensitive).
    :type contbase_list: List of tuple of string
    :param contbase_list: List of tuples of the form \
        ['path', 'type', 'network'].  Where path is the path to the \
        continuous database, type is the directory structure, which can be \
        either Yyyyy/Rjjj.01, which is the standard IRIS Year, julian day \
        structure, or, yyyymmdd which is a single directory for every day.
    :type lowcut: float
    :param lowcut: Low cut (Hz), if set to None will look in template \
            defaults file
    :type highcut: float
    :param lowcut: High cut (Hz), if set to None will look in template \
            defaults file
    :type samp_rate: float
    :param samp_rate: New sampling rate in Hz, if set to None will look in \
            template defaults file
    :type filt_order: int
    :param filt_order: Filter level, if set to None will look in \
            template defaults file
    :type length: float
    :param length: Extract length in seconds, if None will look in template \
            defaults file.
    :type prepick: float
    :param prepick: Pre-pick time in seconds
    :type swin: str
    :param swin: Either 'all', 'P' or 'S', to select which phases to output.
    :type debug: int
    :param debug: Level of debugging output, higher=more
    :type plot: bool
    :param plot: Turns template plotting on or off.

    :returns: obspy.Stream Newly cut template
    """
    # Perform some checks first
    import os
    if not os.path.isfile(sfile):
        raise IOError('sfile does not exist')

    # import some things
    from eqcorrscan.utils import pre_processing
    from eqcorrscan.utils import sfile_util
    import glob
    from obspy import read as obsread

    # Read in the header of the sfile
    event = sfile_util.readheader(sfile)
    day = event.origins[0].time

    # Read in pick info
    catalog = sfile_util.readpicks(sfile)
    picks = catalog[0].picks
    print("I have found the following picks")
    pick_chans = []
    used_picks = []
    for pick in picks:
        station = pick.waveform_id.station_code
        channel = pick.waveform_id.channel_code
        phase = pick.phase_hint
        pcktime = pick.time
        if station + channel not in pick_chans and phase in ['P', 'S']:
            pick_chans.append(station + channel)
            used_picks.append(pick)
            print(pick)
            # #########Left off here
            for contbase in contbase_list:
                if contbase[1] == 'yyyy/mm/dd':
                    daydir = os.path.join([str(day.year),
                                           str(day.month).zfill(2),
                                           str(day.day).zfill(2)])
                elif contbase[1] == 'Yyyyy/Rjjj.01':
                    daydir = os.path.join(['Y' + str(day.year),
                                           'R' + str(day.julday).zfill(3) +
                                           '.01'])
                elif contbase[1] == 'yyyymmdd':
                    daydir = day.datetime.strftime('%Y%m%d')
                if 'wavefiles' not in locals():
                    wavefiles = (glob.glob(os.path.join([contbase[0], daydir,
                                                         '*' + station +
                                                         '.*'])))
                else:
                    wavefiles += glob.glob(os.path.join([contbase[0], daydir,
                                                         '*' + station +
                                                         '.*']))
        elif phase in ['P', 'S']:
            print(' '.join(['Duplicate pick', station, channel,
                            phase, str(pcktime)]))
        elif phase == 'IAML':
            print(' '.join(['Amplitude pick', station, channel,
                            phase, str(pcktime)]))
    picks = used_picks
    wavefiles = list(set(wavefiles))

    # Read in waveform file
    wavefiles.sort()
    for wavefile in wavefiles:
        print("I am going to read waveform data from: " + wavefile)
        if 'st' not in locals():
            st = obsread(wavefile)
        else:
            st += obsread(wavefile)
    # Process waveform data
    st.merge(fill_value='interpolate')
    for tr in st:
        tr = pre_processing.dayproc(tr, lowcut, highcut, filt_order,
                                    samp_rate, debug, day)
    # Cut and extract the templates
    st1 = _template_gen(picks, st, length, swin, prepick=prepick, plot=plot)
    return st1
Ejemplo n.º 20
0
 startdate=UTCDateTime(startdate[0:4]+'-'+startdate[4:6]+'-'+startdate[6:8])
 enddate=UTCDateTime(enddate[0:4]+'-'+enddate[4:6]+'-'+enddate[6:8])
 kdays=int((enddate-startdate)/86400)+1
 print 'I will loop through '+str(kdays)+' days'
 import glob
 from obspy import read as obsread
 for i in xrange(kdays):
     date=startdate+86400*i
     print 'Working on day: '+str(date)
     daydir=str(date.year)+str(date.month).zfill(2)+str(date.day).zfill(2)
     infiles=glob.glob(indir+'/Y'+str(date.year)+'/R'+str(date.julday).zfill(3)+\
                      '.01/*')
     for infile in infiles:
         if debug:
             print 'Reading in '+infile
         tr=obsread(infile)
         # Fill any gaps in the data
         tr=tr.merge(fill_value='interpolate')
         # Make daylong
         tr=tr.detrend('simple')
         tr=tr.trim(starttime=date, endtime=date+86400, pad=True, fill_value=0,\
                 nearest_sample=False)
         tr=tr[0]
         if debug:
             print 'Read in file, it is '+str(len(tr.data))+' samples long'
         qual=check_daylong(tr)
         if qual:
             trenv=envsac(tr, 1.5, 5.0, 1, debug)
             if debugplot:
                 trenv.plot()
             del tr
Ejemplo n.º 21
0
def brightness(stations,
               nodes,
               lags,
               stream,
               threshold,
               thresh_type,
               template_length,
               template_saveloc,
               coherence_thresh,
               coherence_stations=['all'],
               coherence_clip=False,
               gap=2.0,
               clip_level=100,
               instance=0,
               pre_pick=0.2,
               plotvar=False,
               plotsave=True,
               cores=1,
               debug=0,
               mem_issue=False):
    """
    Calculate the brightness function for a single day.

    Written to calculate the brightness function for a single day of data,
    using moveouts from a 3D travel-time grid.

    .. Note::
        Data in stream must be all of the same length and have the same
        sampling rates, see :func:`eqcorrscan.utils.pre_processing.dayproc`

    :type stations: list
    :param stations:
        List of station names from in the form where stations[i] refers to
        nodes[i][:] and lags[i][:]
    :type nodes: list
    :param nodes:
        List of node points where nodes[i] refers to stations[i] and
        nodes[:][:][0] is latitude in degrees, nodes[:][:][1] is longitude in
        degrees, nodes[:][:][2] is depth in km.
    :type lags: numpy.ndarray
    :param lags:
        Array of arrays where lags[i][:] refers to stations[i]. lags[i][j]
        should be the delay to the nodes[i][j] for stations[i] in seconds.
    :type stream: obspy.core.stream.Stream
    :param stream: Data through which to look for detections.
    :type threshold: float
    :param threshold:
        Threshold value for detection of template within the brightness
        function.
    :type thresh_type: str
    :param thresh_type:
        Either MAD or abs where MAD is the Median Absolute Deviation and abs
        is an absolute brightness.
    :type template_length: float
    :param template_length: Length of template to extract in seconds
    :type template_saveloc: str
    :param template_saveloc: Path of where to save the templates.
    :type coherence_thresh: tuple
    :param coherence_thresh:
            Threshold for removing incoherent peaks in the network response,
            those below this will not be used as templates. Must be in the
            form of (a,b) where the coherence is given by: :math:`a-kchan/b`
            where kchan is the number of channels used to compute the
            coherence.
    :type coherence_stations: list
    :param coherence_stations:
        List of stations to use in the coherence thresholding - defaults to
        `all` which uses all the stations.
    :type coherence_clip: tuple
    :param coherence_clip:
        Start and end in seconds of data to window around, defaults to False,
        which uses all the data given.
    :type gap: float
    :param gap: Minimum inter-event time in seconds for detections.
    :type clip_level: float
    :param clip_level:
        Multiplier applied to the mean deviation of the energy as an upper
        limit, used to remove spikes (earthquakes, lightning, electrical
        spikes) from the energy stack.
    :type instance: int
    :param instance:
        Optional, used for tracking when using a distributed computing system.
    :type pre_pick: float
    :param pre_pick: Seconds before the detection time to include in template
    :type plotvar: bool
    :param plotvar: Turn plotting on or off
    :type plotsave: bool
    :param plotsave:
        Save or show plots, if `False` will try and show the plots on screen -
        as this is designed for bulk use this is set to `True` to save any
        plots rather than show them if you create them - changes the backend
        of matplotlib, so if is set to `False` you will see NO PLOTS!
    :type cores: int
    :param cores: Number of cores to use, defaults to 1.
    :type debug: int
    :param debug: Debug level from 0-5, higher is more output.
    :type mem_issue: bool
    :param mem_issue:
        Set to True to write temporary variables to disk rather than store in
        memory, slow.

    :return: list of templates as :class:`obspy.core.stream.Stream` objects
    :rtype: list
    """
    if plotsave:
        import matplotlib
        matplotlib.use('Agg')
        import matplotlib.pyplot as plt
        plt.ioff()
    from eqcorrscan.utils import plotting
    from eqcorrscan.utils.debug_log import debug_print
    # Check that we actually have the correct stations
    realstations = []
    for station in stations:
        st = stream.select(station=station)
        if st:
            realstations += station
    del st
    stream_copy = stream.copy()
    # Force convert to int16
    for tr in stream_copy:
        # int16 max range is +/- 32767
        if max(abs(tr.data)) > 32767:
            tr.data = 32767 * (tr.data / max(abs(tr.data)))
            # Make sure that the data aren't clipped it they are high gain
            # scale the data
        tr.data = tr.data.astype(np.int16)
    # The internal _node_loop converts energy to int16 too to conserve memory,
    # to do this it forces the maximum of a single energy trace to be 500 and
    # normalises to this level - this only works for fewer than 65 channels of
    # data
    if len(stream_copy) > 130:
        raise BrightnessError(
            'Too many streams, either re-code and cope with either more memory'
            ' usage, or less precision, or reduce data volume')
    # Loop through each node in the input
    # Linear run
    print('Computing the energy stacks')
    # Parallel run
    num_cores = cores
    if num_cores > len(nodes):
        num_cores = len(nodes)
    if num_cores > cpu_count():
        num_cores = cpu_count()
    if mem_issue and not os.path.isdir('tmp' + str(instance)):
        os.makedirs('tmp' + str(instance))
    pool = Pool(processes=num_cores)
    results = [
        pool.apply_async(
            _node_loop, (stations, ), {
                'lags': lags[:, i],
                'stream': stream,
                'i': i,
                'clip_level': clip_level,
                'mem_issue': mem_issue,
                'instance': instance
            }) for i in range(len(nodes))
    ]
    pool.close()
    if not mem_issue:
        print('Computing the cumulative network response from memory')
        energy = [p.get() for p in results]
        pool.join()
        energy.sort(key=lambda tup: tup[0])
        energy = [node[1] for node in energy]
        energy = np.concatenate(energy, axis=0)
        print(energy.shape)
    else:
        pool.join()
        del results
    # Now compute the cumulative network response and then detect possible
    # events
    if not mem_issue:
        print(energy.shape)
        indices = np.argmax(energy, axis=0)  # Indices of maximum energy
        print(indices.shape)
        cum_net_resp = np.array([np.nan] * len(indices))
        cum_net_resp[0] = energy[indices[0]][0]
        peak_nodes = [nodes[indices[0]]]
        for i in range(1, len(indices)):
            cum_net_resp[i] = energy[indices[i]][i]
            peak_nodes.append(nodes[indices[i]])
        del energy, indices
    else:
        print('Reading the temp files and computing network response')
        node_splits = int(len(nodes) // num_cores)
        print(node_splits)
        indices = []
        for i in range(num_cores):
            indices.append(
                list(np.arange(node_splits * i, node_splits * (i + 1))))
        indices[-1] += list(np.arange(node_splits * (i + 1), len(nodes)))
        # results = [_cum_net_resp(node_lis=indices[i], instance=instance)
        #            for i in range(num_cores)]
        pool = Pool(processes=num_cores)
        results = [
            pool.apply_async(_cum_net_resp, args=(indices[i], instance))
            for i in range(num_cores)
        ]
        pool.close()
        results = [p.get() for p in results]
        pool.join()
        responses = [result[0] for result in results]
        print(np.shape(responses))
        node_indices = [result[1] for result in results]
        cum_net_resp = np.array(responses)
        indices = np.argmax(cum_net_resp, axis=0)
        print(indices.shape)
        print(cum_net_resp.shape)
        cum_net_resp = np.array(
            [cum_net_resp[indices[i]][i] for i in range(len(indices))])
        peak_nodes = [
            nodes[node_indices[indices[i]][i]] for i in range(len(indices))
        ]
        del indices, node_indices
    if plotvar:
        cum_net_trace = Stream(
            Trace(data=cum_net_resp,
                  header=Stats({
                      'station': 'NR',
                      'channel': '',
                      'network': 'Z',
                      'location': '',
                      'starttime': stream[0].stats.starttime,
                      'sampling_rate': stream[0].stats.sampling_rate
                  })))
        cum_net_trace += stream.select(channel='*N')
        cum_net_trace += stream.select(channel='*1')
        cum_net_trace.sort(['network', 'station', 'channel'])

    # Find detection within this network response
    print('Finding detections in the cumulative network response')
    detections = _find_detections(cum_net_resp, peak_nodes, threshold,
                                  thresh_type, stream[0].stats.sampling_rate,
                                  realstations, gap)
    del cum_net_resp
    templates = []
    nodesout = []
    good_detections = []
    if detections:
        print('Converting detections into templates')
        # Generate a catalog of detections
        # detections_cat = Catalog()
        for j, detection in enumerate(detections):
            debug_print(
                'Converting for detection %i of %i' % (j, len(detections)), 3,
                debug)
            # Create an event for each detection
            event = Event()
            # Set up some header info for the event
            event.event_descriptions.append(EventDescription())
            event.event_descriptions[0].text = 'Brightness detection'
            event.creation_info = CreationInfo(agency_id='EQcorrscan')
            copy_of_stream = deepcopy(stream_copy)
            # Convert detections to obspy.core.event type -
            # name of detection template is the node.
            node = (detection.template_name.split('_')[0],
                    detection.template_name.split('_')[1],
                    detection.template_name.split('_')[2])
            # Look up node in nodes and find the associated lags
            index = nodes.index(
                (float(node[0]), float(node[1]), float(node[2])))
            detect_lags = lags[:, index]
            ksta = Comment(text='Number of stations=' + str(len(detect_lags)))
            event.origins.append(Origin())
            event.origins[0].comments.append(ksta)
            event.origins[0].time = copy_of_stream[0].stats.starttime +\
                detect_lags[0] + detection.detect_time
            event.origins[0].latitude = node[0]
            event.origins[0].longitude = node[1]
            event.origins[0].depth = node[2]
            for i, detect_lag in enumerate(detect_lags):
                station = stations[i]
                st = copy_of_stream.select(station=station)
                if len(st) != 0:
                    for tr in st:
                        _waveform_id = WaveformStreamID(
                            station_code=tr.stats.station,
                            channel_code=tr.stats.channel,
                            network_code=tr.stats.network)
                        event.picks.append(
                            Pick(waveform_id=_waveform_id,
                                 time=tr.stats.starttime + detect_lag +
                                 detection.detect_time + pre_pick,
                                 onset='emergent',
                                 evalutation_mode='automatic'))
            debug_print('Generating template for detection: %i' % j, 0, debug)
            template = template_gen(picks=event.picks,
                                    st=copy_of_stream,
                                    length=template_length,
                                    swin='all')
            template_name = template_saveloc + '/' +\
                str(template[0].stats.starttime) + '.ms'
            # In the interests of RAM conservation we write then read
            # Check coherency here!
            temp_coher, kchan = coherence(template, coherence_stations,
                                          coherence_clip)
            coh_thresh = float(coherence_thresh[0]) - kchan / \
                float(coherence_thresh[1])
            coherent = False
            if temp_coher > coh_thresh:
                template.write(template_name, format="MSEED")
                print('Written template as: ' + template_name)
                print('---------------------------------coherence LEVEL: ' +
                      str(temp_coher))
                coherent = True
                debug_print(
                    'Template was incoherent, coherence level: ' +
                    str(temp_coher), 0, debug)
                coherent = False
            del copy_of_stream, tr, template
            if coherent:
                templates.append(obsread(template_name))
                nodesout += [node]
                good_detections.append(detection)
            debug_print('No template for you', 0, debug)
            # detections_cat += event
    if plotvar:
        good_detections = [(cum_net_trace[-1].stats.starttime +
                            detection.detect_time).datetime
                           for detection in good_detections]
        if not plotsave:
            plotting.NR_plot(cum_net_trace[0:-1],
                             Stream(cum_net_trace[-1]),
                             detections=good_detections,
                             size=(18.5, 10),
                             title='Network response')
            # cum_net_trace.plot(size=(800,600), equal_scale=False)
        else:
            savefile = 'plots/' +\
                cum_net_trace[0].stats.starttime.datetime.strftime('%Y%m%d') +\
                '_NR_timeseries.pdf'
            plotting.NR_plot(cum_net_trace[0:-1],
                             Stream(cum_net_trace[-1]),
                             detections=good_detections,
                             size=(18.5, 10),
                             save=True,
                             savefile=savefile,
                             title='Network response')
    nodesout = list(set(nodesout))
    return templates, nodesout
Ejemplo n.º 22
0
 daylist=sorted(list(set(datelist))) # Get unique values
 if len(daylist) > 1:
     print 'You have collected data over multiple days - slacker'
     print 'Will run over: '+str(len(daylist))+' unique days'
     #sys.exit()
 if 'prevdaypath' in locals():
     del prevdaypath # Explicitly remove the previous daypath from locals
 daycount=0
 for daypath in daylist:
     yeardir=daypath.split('/')[len(daypath.split('/'))-2]   # Will be of the form Y2014
     daydir=daypath.split('/')[len(daypath.split('/'))-1]    # Will be of the form R201.01
     if daycount < len(daylist)-1:
         nextdaypath=daylist[daycount+1]
     print '\n'+daydir
     if defaults.rawconv=='True':
         st=obsread(defaults.outdir+'/*/'+yeardir+'/'+daydir+'/*.m')
     else:
         st=obsread(defaults.outdir+'/'+yeardir+'/'+daydir+'/*.m')
     print 'Merging data'
     try:
         st.merge(fill_value=0)  # merge data, filling missing data with zeros -
                                 # allows for writing to multiplexed miniseed
     except:
         print 'Could not merge data for this day - same IDs but different sampling rates likely'
         samp_rate=st[0].stats.sampling_rate
         if 'st_dummy' in locals():
             del st_dummy
         for tr in st:
             if not tr.stats.sampling_rate==samp_rate:
                 print 'station: '+tr.stats.station+' samp-rate: '+\
                         str(tr.stats.sampling_rate)
Ejemplo n.º 23
0
def blanksfile(wavefile,evtype,userID,outdir,overwrite=False, evtime=False):
    """
    Module to generate an empty s-file with a populated header for a given
    waveform.

    :type wavefile: String
    :param wavefile: Wavefile to associate with this S-file, the timing of the
                    S-file will be taken from this file if evtime is not set
    :type evtype: String
    :param evtype: L,R,D
    :type userID: String
    :param userID: 4-charectar SEISAN USER ID
    :type outdir: String
    :param outdir: Location to write S-file
    :type overwrite: Bool
    :param overwrite: Overwrite an existing S-file, default=False
    :type evtime: UTCDateTime
    :param evtime: If given this will set the timing of the S-file

    :returns: String, S-file name
    """

    from obspy import read as obsread
    import sys,os, datetime
    if not evtime:
        try:
            st=obsread(wavefile)
        except:
            print 'Wavefile: '+wavefile+' is invalid, try again with real data.'
            sys.exit()
    else:
        starttime=evtime
    # Check that user ID is the correct length
    if len(userID) != 4:
        print 'User ID must be 4 characters long'
        sys.exit()
    # Check that outdir exists
    if not os.path.isdir(outdir):
        print 'Out path does not exist, I will not create this: '+outdir
        sys.exit()
    # Check that evtype is one of L,R,D
    if evtype not in ['L','R','D']:
        print 'Event type must be either L, R or D'
        sys.exit()

    # Generate s-file name in the format dd-hhmm-ss[L,R,D].Syyyymm
    sfilename=outdir+'/'+str(st[0].stats.starttime.day).zfill(2)+'-'+\
            str(st[0].stats.starttime.hour).zfill(2)+\
            str(st[0].stats.starttime.minute).zfill(2)+'-'+\
            str(st[0].stats.starttime.second).zfill(2)+evtype+'.S'+\
            str(st[0].stats.starttime.year)+\
            str(st[0].stats.starttime.month).zfill(2)
    # Check is sfilename exists
    if os.path.isfile(sfilename) and not overwrite:
        print 'Desired sfilename: '+sfilename+' exists, will not overwrite'
        for i in range(1,10):
            sfilename=outdir+'/'+str(st[0].stats.starttime.day).zfill(2)+'-'+\
                    str(st[0].stats.starttime.hour).zfill(2)+\
                    str(st[0].stats.starttime.minute).zfill(2)+'-'+\
                    str(st[0].stats.starttime.second+i).zfill(2)+evtype+'.S'+\
                    str(st[0].stats.starttime.year)+\
                    str(st[0].stats.starttime.month).zfill(2)
            if not os.path.isfile(sfilename):
                break
        else:
            print 'Tried generated files up to 10s in advance and found they'
            print 'all exist, you need to clean your stuff up!'
            sys.exit()
        # sys.exit()
    f=open(sfilename,'w')
    # Write line 1 of s-file
    f.write(' '+str(st[0].stats.starttime.year)+' '+\
            str(st[0].stats.starttime.month).rjust(2)+\
            str(st[0].stats.starttime.day).rjust(2)+' '+\
            str(st[0].stats.starttime.hour).rjust(2)+\
            str(st[0].stats.starttime.minute).rjust(2)+' '+\
            str(st[0].stats.starttime.second).rjust(4)+' '+\
            evtype+'1'.rjust(58)+'\n')
    # Write line 2 of s-file
    f.write(' ACTION:ARG '+str(datetime.datetime.now().year)[2:4]+'-'+\
            str(datetime.datetime.now().month).zfill(2)+'-'+\
            str(datetime.datetime.now().day).zfill(2)+' '+\
            str(datetime.datetime.now().hour).zfill(2)+':'+\
            str(datetime.datetime.now().minute).zfill(2)+' OP:'+\
            userID.ljust(4)+' STATUS:'+'ID:'.rjust(18)+\
            str(st[0].stats.starttime.year)+\
            str(st[0].stats.starttime.month).zfill(2)+\
            str(st[0].stats.starttime.day).zfill(2)+\
            str(st[0].stats.starttime.hour).zfill(2)+\
            str(st[0].stats.starttime.minute).zfill(2)+\
            str(st[0].stats.starttime.second).zfill(2)+\
            'I'.rjust(6)+'\n')
    # Write line 3 of s-file
    f.write(' '+wavefile+'6'.rjust(79-len(wavefile))+'\n')
    # Write final line of s-file
    f.write(' STAT SP IPHASW D HRMM SECON CODA AMPLIT PERI AZIMU'+\
            ' VELO AIN AR TRES W  DIS CAZ7\n')
    f.close()
    print 'Written s-file: '+sfilename
    return sfilename
Ejemplo n.º 24
0
def from_contbase(sfile, contbase_list, lowcut, highcut, samp_rate, filt_order,
                  length, prepick, swin, debug=0, plot=False):
    r"""Function to read in picks from sfile then generate the template from \
    the picks within this and the wavefiles from the continous database of \
    day-long files.  Included is a section to sanity check that the files are \
    daylong and that they start at the start of the day.  You should ensure \
    this is the case otherwise this may alter your data if your data are \
    daylong but the headers are incorrectly set.

    :type sfile: string
    :param sfile: sfilename must be the path to a seisan nordic type s-file \
            containing waveform and pick information, all other arguments can \
            be numbers save for swin which must be either P, S or all \
            (case-sensitive).
    :type contbase_list: List of tuple of string
    :param contbase_list: List of tuples of the form \
        ['path', 'type', 'network'].  Where path is the path to the \
        continuous database, type is the directory structure, which can be \
        either Yyyyy/Rjjj.01, which is the standard IRIS Year, julian day \
        structure, or, yyyymmdd which is a single directory for every day.
    :type lowcut: float
    :param lowcut: Low cut (Hz), if set to None will look in template \
            defaults file
    :type highcut: float
    :param lowcut: High cut (Hz), if set to None will look in template \
            defaults file
    :type samp_rate: float
    :param samp_rate: New sampling rate in Hz, if set to None will look in \
            template defaults file
    :type filt_order: int
    :param filt_order: Filter level, if set to None will look in \
            template defaults file
    :type length: float
    :param length: Extract length in seconds, if None will look in template \
            defaults file.
    :type prepick: float
    :param prepick: Pre-pick time in seconds
    :type swin: str
    :param swin: Either 'all', 'P' or 'S', to select which phases to output.
    :type debug: int
    :param debug: Level of debugging output, higher=more
    :type plot: bool
    :param plot: Turns template plotting on or off.

    :returns: obspy.Stream Newly cut template
    """
    # Perform some checks first
    import os
    if not os.path.isfile(sfile):
        raise IOError('sfile does not exist')

    # import some things
    from eqcorrscan.utils import pre_processing
    from eqcorrscan.utils import sfile_util
    import glob
    from obspy import read as obsread

    # Read in the header of the sfile
    event = sfile_util.readheader(sfile)
    day = event.origins[0].time

    # Read in pick info
    catalog = sfile_util.readpicks(sfile)
    picks = catalog[0].picks
    print("I have found the following picks")
    pick_chans = []
    used_picks = []
    for pick in picks:
        station = pick.waveform_id.station_code
        channel = pick.waveform_id.channel_code
        phase = pick.phase_hint
        pcktime = pick.time
        if station + channel not in pick_chans and phase in ['P', 'S']:
            pick_chans.append(station + channel)
            used_picks.append(pick)
            print(pick)
            # #########Left off here
            for contbase in contbase_list:
                if contbase[1] == 'yyyy/mm/dd':
                    daydir = os.path.join([str(day.year),
                                           str(day.month).zfill(2),
                                           str(day.day).zfill(2)])
                elif contbase[1] == 'Yyyyy/Rjjj.01':
                    daydir = os.path.join(['Y' + str(day.year),
                                           'R' + str(day.julday).zfill(3) +
                                           '.01'])
                elif contbase[1] == 'yyyymmdd':
                    daydir = day.datetime.strftime('%Y%m%d')
                if 'wavefiles' not in locals():
                    wavefiles = (glob.glob(os.path.join([contbase[0], daydir,
                                                         '*' + station +
                                                         '.*'])))
                else:
                    wavefiles += glob.glob(os.path.join([contbase[0], daydir,
                                                         '*' + station +
                                                         '.*']))
        elif phase in ['P', 'S']:
            print(' '.join(['Duplicate pick', station, channel,
                            phase, str(pcktime)]))
        elif phase == 'IAML':
            print(' '.join(['Amplitude pick', station, channel,
                            phase, str(pcktime)]))
    picks = used_picks
    wavefiles = list(set(wavefiles))

    # Read in waveform file
    wavefiles.sort()
    for wavefile in wavefiles:
        print("I am going to read waveform data from: " + wavefile)
        if 'st' not in locals():
            st = obsread(wavefile)
        else:
            st += obsread(wavefile)
    # Process waveform data
    st.merge(fill_value='interpolate')
    for tr in st:
        tr = pre_processing.dayproc(tr, lowcut, highcut, filt_order,
                                    samp_rate, debug, day)
    # Cut and extract the templates
    st1 = _template_gen(picks, st, length, swin, prepick=prepick, plot=plot,
                        debug=debug)
    return st1
Ejemplo n.º 25
0
print 'highcut: ' + str(templatedef.highcut) + ' Hz'
print 'length: ' + str(templatedef.length) + ' s'
print 'swin: ' + templatedef.swin + '\n'
for sfile in templatedef.sfiles:
    print 'Working on: ' + sfile + '\r'
    if not os.path.isfile(templatedef.saveloc + '/' + sfile + '_template.ms'):
        template=template_gen.from_contbase(templatedef.sfilebase+'/'+sfile,\
                                            tempdef=templatedef,\
                                            matchdef=matchdef)

        print 'saving template as: '+templatedef.saveloc+'/'+\
                str(template[0].stats.starttime)+'.ms'
        template.write(templatedef.saveloc+'/'+\
                   sfile+'_template.ms',format="MSEED")
    else:
        template = obsread(templatedef.saveloc + '/' + sfile + '_template.ms')
    templates += [template]
    # Will read in seisan s-file and generate a template from this,
    # returned name will be the template name, used for parsing to the later
    # functions

# for tfile in templatedef.tfiles:
# # Loop through pre-existing template files
# sys.stdout.write("\rReading in pre-existing template: "+tfile+"\r")
# sys.stdout.flush()
# templates.append(obsread(tfile))

templates = [obsread(tfile) for tfile in templatedef.tfiles]

print 'Read in ' + str(len(templates)) + ' templates'
Ejemplo n.º 26
0
def from_sfile(sfile, lowcut, highcut, samp_rate, filt_order, length, swin,
               prepick=0.05, debug=0, plot=False):
    r"""Function to read in picks from sfile then generate the template from \
    the picks within this and the wavefile found in the pick file.

    :type sfile: string
    :param sfile: sfilename must be the \
        path to a seisan nordic type s-file containing waveform and pick \
        information.
    :type lowcut: float
    :param lowcut: Low cut (Hz), if set to None will look in template \
            defaults file
    :type highcut: float
    :param highcut: High cut (Hz), if set to None will look in template \
            defaults file
    :type samp_rate: float
    :param samp_rate: New sampling rate in Hz, if set to None will look in \
            template defaults file
    :type filt_order: int
    :param filt_order: Filter level, if set to None will look in \
            template defaults file
    :type swin: str
    :param swin: Either 'all', 'P' or 'S', to select which phases to output.
    :type length: float
    :param length: Extract length in seconds, if None will look in template \
            defaults file.
    :type prepick: float
    :param prepick: Length to extract prior to the pick in seconds.
    :type debug: int
    :param debug: Debug level, higher number=more output.
    :type plot: bool
    :param plot: Turns template plotting on or off.

    :returns: obspy.Stream Newly cut template

    .. warning:: This will use whatever data is pointed to in the s-file, if \
        this is not the coninuous data, we recommend using other functions. \
        Differences in processing between short files and day-long files \
        (inherent to resampling) will produce lower cross-correlations.
    """
    # Perform some checks first
    import os
    if not os.path.isfile(sfile):
        raise IOError('sfile does not exist')

    from eqcorrscan.utils import pre_processing
    from eqcorrscan.utils import sfile_util
    from obspy import read as obsread
    # Read in the header of the sfile
    wavefiles = sfile_util.readwavename(sfile)
    pathparts = sfile.split('/')[0:-1]
    new_path_parts = []
    for part in pathparts:
        if part == 'REA':
            part = 'WAV'
        new_path_parts.append(part)
    # * argument to allow .join() to accept a list
    wavpath = os.path.join(*new_path_parts) + '/'
    # In case of absolute paths (not handled with .split() --> .join())
    if sfile[0] == '/':
        wavpath = '/' + wavpath
    # Read in waveform file
    for wavefile in wavefiles:
        print(''.join(["I am going to read waveform data from: ", wavpath,
                       wavefile]))
        if 'st' not in locals():
            st = obsread(wavpath + wavefile)
        else:
            st += obsread(wavpath + wavefile)
    for tr in st:
        if tr.stats.sampling_rate < samp_rate:
            print('Sampling rate of data is lower than sampling rate asked ' +
                  'for')
            print('Not good practice for correlations: I will not do this')
            raise ValueError("Trace: " + tr.stats.station +
                             " sampling rate: " + str(tr.stats.sampling_rate))
    # Read in pick info
    catalog = sfile_util.readpicks(sfile)
    # Read the list of Picks for this event
    picks = catalog[0].picks
    print("I have found the following picks")
    for pick in picks:
        print(' '.join([pick.waveform_id.station_code,
                        pick.waveform_id.channel_code, pick.phase_hint,
                        str(pick.time)]))

    # Process waveform data
    st.merge(fill_value='interpolate')
    st = pre_processing.shortproc(st, lowcut, highcut, filt_order,
                                  samp_rate, debug)
    st1 = _template_gen(picks=picks, st=st, length=length, swin=swin,
                        prepick=prepick, plot=plot, debug=debug)
    return st1
Ejemplo n.º 27
0
 yeardir=daypath.split('/')[len(daypath.split('/'))-2]   # Will be of the form Y2014
 daydir=daypath.split('/')[len(daypath.split('/'))-1]    # Will be of the form R201.01
 day=UTCDateTime(yeardir[1:5]+daydir[1:4])
 if daycount < len(daylist)-1:
     nextdaypath=daylist[daycount+1]
 print '\n'+daydir
 if 'st' in locals():
     del st
 if defaults.rawconv or defaults.converted:
     rawfiles=glob.glob(defaults.outdir+'/*/'+yeardir+'/'+daydir+'/*.m')
 else:
     rawfiles=glob.glob(defaults.outdir+'/'+yeardir+'/'+daydir+'/*.m')
 for rawfile in rawfiles:
     if not 'st' in locals():
         try:
             st=obsread(rawfile)
         except:
             print rawfile+' is corrupt'
     else:
         try:
             st+=obsread(rawfile)
         except:
             print rawfile+' is corrupt'
 print 'Merging data'
 try:
     # for tr in st:
         # tr = tr.detrend('simple')
     st.merge()  # merge data, filling missing data with zeros -
                             # allows for writing to multiplexed miniseed
 except:
     print 'Could not merge data for this day - same IDs but different sampling rates likely'
Ejemplo n.º 28
0
    def get_inventory_from_df(self, df, client=None, data=True):
        """
        Get an :class:`obspy.Inventory` object from a
        :class:`pandas.DataFrame`

        :param df: DataFrame with columns

            - 'network'   --> FDSN Network code
            - 'station'   --> FDSN Station code
            - 'location'  --> FDSN Location code
            - 'channel'   --> FDSN Channel code
            - 'start'     --> Start time YYYY-MM-DDThh:mm:ss
            - 'end'       --> End time YYYY-MM-DDThh:mm:ss

        :type df: :class:`pandas.DataFrame`
        :param client: FDSN client
        :type client: string
        :param data: True if you want data False if you want just metadata,
        defaults to True
        :type data: boolean, optional
        :return: An inventory of metadata requested and data
        :rtype: :class:`obspy.Inventory` and :class:`obspy.Stream`

        .. seealso:: https://docs.obspy.org/packages/obspy.clients.fdsn.html#id1

        .. note:: If any of the column values are blank, then any value will
        searched for.  For example if you leave 'station' blank, any station
        within the given start and end time will be returned.

        """
        if client is not None:
            self.client = client

        df = self._validate_dataframe(df)

        # get the metadata from an obspy client
        client = fdsn.Client(self.client)

        # creat an empty stream to add to
        streams = obsread()
        streams.clear()

        inv = Inventory(networks=[], source="MTH5")

        # sort the values to be logically ordered
        df.sort_values(self.column_names[:-1])

        used_network = dict()
        used_station = dict()
        for row in df.itertuples():
            # First for loop builds out networks and stations
            if row.network not in used_network:
                net_inv = client.get_stations(
                    row.start, row.end, network=row.network, level="network"
                )
                returned_network = net_inv.networks[0]
                used_network[row.network] = [row.start]
            elif used_network.get(
                row.network
            ) is not None and row.start not in used_network.get(row.network):
                net_inv = client.get_stations(
                    row.start, row.end, network=row.network, level="network"
                )
                returned_network = net_inv.networks[0]
                used_network[row.network].append(row.start)
            else:
                continue
            for st_row in df.itertuples():
                if row.network != st_row.network:
                    continue
                else:
                    if st_row.station not in used_station:
                        sta_inv = client.get_stations(
                            st_row.start,
                            st_row.end,
                            network=row.network,
                            station=st_row.station,
                            level="station",
                        )
                        returned_sta = sta_inv.networks[0].stations[0]
                        used_station[st_row.station] = [st_row.start]
                    elif used_station.get(
                        st_row.station
                    ) is not None and st_row.start not in used_station.get(
                        st_row.station
                    ):
                        # Checks for epoch
                        sta_inv = client.get_stations(
                            st_row.start,
                            st_row.end,
                            network=st_row.network,
                            station=st_row.station,
                            level="station",
                        )
                        returned_sta = sta_inv.networks[0].stations[0]
                        used_station[st_row.station].append(st_row.start)
                    else:
                        continue
                for ch_row in df.itertuples():
                    if (
                        ch_row.network == row.network
                        and st_row.station == ch_row.station
                        and ch_row.start == st_row.start
                    ):
                        cha_inv = client.get_stations(
                            ch_row.start,
                            ch_row.end,
                            network=ch_row.network,
                            station=ch_row.station,
                            loc=ch_row.location,
                            channel=ch_row.channel,
                            level="response",
                        )
                        returned_chan = cha_inv.networks[0].stations[0].channels[0]
                        returned_sta.channels.append(returned_chan)

                        # -----------------------------
                        # get data if desired
                        if data:
                            streams = (
                                client.get_waveforms(
                                    ch_row.network,
                                    ch_row.station,
                                    ch_row.location,
                                    ch_row.channel,
                                    UTCDateTime(ch_row.start),
                                    UTCDateTime(ch_row.end),
                                )
                                + streams
                            )
                    else:
                        continue

                returned_network.stations.append(returned_sta)
            inv.networks.append(returned_network)

        return inv, streams
Ejemplo n.º 29
0
def blanksfile(wavefile,
               evtype,
               userID,
               outdir,
               overwrite=False,
               evtime=False):
    """
    Module to generate an empty s-file with a populated header for a given
    waveform.

    :type wavefile: String
    :param wavefile: Wavefile to associate with this S-file, the timing of the
                    S-file will be taken from this file if evtime is not set
    :type evtype: String
    :param evtype: L,R,D
    :type userID: String
    :param userID: 4-charectar SEISAN USER ID
    :type outdir: String
    :param outdir: Location to write S-file
    :type overwrite: Bool
    :param overwrite: Overwrite an existing S-file, default=False
    :type evtime: UTCDateTime
    :param evtime: If given this will set the timing of the S-file

    :returns: String, S-file name
    """

    from obspy import read as obsread
    import sys
    import os
    import datetime

    if not evtime:
        try:
            st = obsread(wavefile)
            evtime = st[0].stats.starttime
        except:
            print 'Wavefile: ' + wavefile + ' is invalid, try again with real data.'
            sys.exit()
    else:
        starttime = evtime
    # Check that user ID is the correct length
    if len(userID) != 4:
        print 'User ID must be 4 characters long'
        sys.exit()
    # Check that outdir exists
    if not os.path.isdir(outdir):
        print 'Out path does not exist, I will not create this: ' + outdir
        sys.exit()
    # Check that evtype is one of L,R,D
    if evtype not in ['L', 'R', 'D']:
        print 'Event type must be either L, R or D'
        sys.exit()

    # Generate s-file name in the format dd-hhmm-ss[L,R,D].Syyyymm
    sfilename=outdir+'/'+str(evtime.day).zfill(2)+'-'+\
            str(evtime.hour).zfill(2)+\
            str(evtime.minute).zfill(2)+'-'+\
            str(evtime.second).zfill(2)+evtype+'.S'+\
            str(evtime.year)+\
            str(evtime.month).zfill(2)
    # Check is sfilename exists
    if os.path.isfile(sfilename) and not overwrite:
        print 'Desired sfilename: ' + sfilename + ' exists, will not overwrite'
        for i in range(1, 10):
            sfilename=outdir+'/'+str(evtime.day).zfill(2)+'-'+\
                    str(evtime.hour).zfill(2)+\
                    str(evtime.minute).zfill(2)+'-'+\
                    str(evtime.second+i).zfill(2)+evtype+'.S'+\
                    str(evtime.year)+\
                    str(evtime.month).zfill(2)
            if not os.path.isfile(sfilename):
                break
        else:
            print 'Tried generated files up to 10s in advance and found they'
            print 'all exist, you need to clean your stuff up!'
            sys.exit()
        # sys.exit()
    f = open(sfilename, 'w')
    # Write line 1 of s-file
    f.write(' '+str(evtime.year)+' '+\
            str(evtime.month).rjust(2)+\
            str(evtime.day).rjust(2)+' '+\
            str(evtime.hour).rjust(2)+\
            str(evtime.minute).rjust(2)+' '+\
            str(float(evtime.second)).rjust(4)+' '+\
            evtype+'1'.rjust(58)+'\n')
    # Write line 2 of s-file
    f.write(' ACTION:ARG '+str(datetime.datetime.now().year)[2:4]+'-'+\
            str(datetime.datetime.now().month).zfill(2)+'-'+\
            str(datetime.datetime.now().day).zfill(2)+' '+\
            str(datetime.datetime.now().hour).zfill(2)+':'+\
            str(datetime.datetime.now().minute).zfill(2)+' OP:'+\
            userID.ljust(4)+' STATUS:'+'ID:'.rjust(18)+\
            str(evtime.year)+\
            str(evtime.month).zfill(2)+\
            str(evtime.day).zfill(2)+\
            str(evtime.hour).zfill(2)+\
            str(evtime.minute).zfill(2)+\
            str(evtime.second).zfill(2)+\
            'I'.rjust(6)+'\n')
    # Write line 3 of s-file
    f.write(' ' + wavefile + '6'.rjust(79 - len(wavefile)) + '\n')
    # Write final line of s-file
    f.write(' STAT SP IPHASW D HRMM SECON CODA AMPLIT PERI AZIMU'+\
            ' VELO AIN AR TRES W  DIS CAZ7\n')
    f.close()
    print 'Written s-file: ' + sfilename
    return sfilename
Ejemplo n.º 30
0
def blanksfile(wavefile, evtype, userID, outdir, overwrite=False,
               evtime=False):
    """
    Generate an empty s-file with a populated header for a given waveform.

    :type wavefile: str
    :param wavefile: Wavefile to associate with this S-file, the timing of \
        the S-file will be taken from this file if evtime is not set.
    :type evtype: str
    :param evtype: Event type letter code, e.g. L, R, D
    :type userID: str
    :param userID: 4-character SEISAN USER ID
    :type outdir: str
    :param outdir: Location to write S-file
    :type overwrite: bool
    :param overwrite: Overwrite an existing S-file, default=False
    :type evtime: obspy.core.utcdatetime.UTCDateTime
    :param evtime: If given this will set the timing of the S-file

    :returns: str, S-file name

    >>> from eqcorrscan.utils.sfile_util import readwavename
    >>> import os
    >>> wavefile = os.path.join('eqcorrscan', 'tests', 'test_data', 'WAV',
    ...                         'TEST_', '2013-09-01-0410-35.DFDPC_024_00')
    >>> sfile = blanksfile(wavefile, 'L', 'TEST',
    ...                    '.', overwrite=True)
    Written s-file: ./01-0410-35L.S201309
    >>> readwavename(sfile)
    ['2013-09-01-0410-35.DFDPC_024_00']
    """

    from obspy import read as obsread
    import sys
    import os
    import datetime

    if not evtime:
        try:
            st = obsread(wavefile)
            evtime = st[0].stats.starttime
        except:
            raise IOError('Wavefile: ' + wavefile +
                          ' is invalid, try again with real data.')
    # Check that user ID is the correct length
    if len(userID) != 4:
        raise IOError('User ID must be 4 characters long')
    # Check that outdir exists
    if not os.path.isdir(outdir):
        raise IOError('Out path does not exist, I will not create this: ' +
                      outdir)
    # Check that evtype is one of L,R,D
    if evtype not in ['L', 'R', 'D']:
        raise IOError('Event type must be either L, R or D')

    # Generate s-file name in the format dd-hhmm-ss[L,R,D].Syyyymm
    sfile = outdir + '/' + str(evtime.day).zfill(2) + '-' +\
        str(evtime.hour).zfill(2) +\
        str(evtime.minute).zfill(2) + '-' +\
        str(evtime.second).zfill(2) + evtype + '.S' +\
        str(evtime.year) +\
        str(evtime.month).zfill(2)
    # Check is sfile exists
    if os.path.isfile(sfile) and not overwrite:
        print('Desired sfile: ' + sfile + ' exists, will not overwrite')
        for i in range(1, 10):
            sfile = outdir + '/' + str(evtime.day).zfill(2) + '-' +\
                str(evtime.hour).zfill(2) +\
                str(evtime.minute).zfill(2) + '-' +\
                str(evtime.second + i).zfill(2) + evtype + '.S' +\
                str(evtime.year) +\
                str(evtime.month).zfill(2)
            if not os.path.isfile(sfile):
                break
        else:
            msg = 'Tried generated files up to 20s in advance and found ' +\
                'all exist, you need to clean your stuff up!'
            raise IOError(msg)
        # sys.exit()
    f = open(sfile, 'w')
    # Write line 1 of s-file
    f.write(str(' ' + str(evtime.year) + ' ' +
                str(evtime.month).rjust(2) +
                str(evtime.day).rjust(2) + ' ' +
                str(evtime.hour).rjust(2) +
                str(evtime.minute).rjust(2) + ' ' +
                str(float(evtime.second)).rjust(4) + ' ' +
                evtype + '1'.rjust(58) + '\n'))
    # Write line 2 of s-file
    f.write(str(' ACTION:ARG ' + str(datetime.datetime.now().year)[2:4] + '-' +
                str(datetime.datetime.now().month).zfill(2) + '-' +
                str(datetime.datetime.now().day).zfill(2) + ' ' +
                str(datetime.datetime.now().hour).zfill(2) + ':' +
                str(datetime.datetime.now().minute).zfill(2) + ' OP:' +
                userID.ljust(4) + ' STATUS:' + 'ID:'.rjust(18) +
                str(evtime.year) +
                str(evtime.month).zfill(2) +
                str(evtime.day).zfill(2) +
                str(evtime.hour).zfill(2) +
                str(evtime.minute).zfill(2) +
                str(evtime.second).zfill(2) +
                'I'.rjust(6) + '\n'))
    # Write line 3 of s-file
    write_wavfile = wavefile.split(os.sep)[-1]
    f.write(str(' ' + write_wavfile + '6'.rjust(79 - len(write_wavfile)) +
                '\n'))
    # Write final line of s-file
    f.write(str(' STAT SP IPHASW D HRMM SECON CODA AMPLIT PERI AZIMU' +
                ' VELO AIN AR TRES W  DIS CAZ7\n'))
    f.close()
    print('Written s-file: ' + sfile)
    return sfile
Ejemplo n.º 31
0
def brightness(stations,
               nodes,
               lags,
               stream,
               threshold,
               thresh_type,
               template_length,
               template_saveloc,
               coherence_thresh,
               coherence_stations=['all'],
               coherence_clip=False,
               gap=2.0,
               clip_level=100,
               instance=0,
               pre_pick=0.2,
               plotsave=True,
               cores=1):
    r"""Function to calculate the brightness function in terms of energy for \
    a day of data over the entire network for a given grid of nodes.

    Note data in stream must be all of the same length and have the same
    sampling rates.

    :type stations: list
    :param stations: List of station names from in the form where stations[i] \
        refers to nodes[i][:] and lags[i][:]
    :type nodes: list, tuple
    :param nodes: List of node points where nodes[i] referes to stations[i] \
        and nodes[:][:][0] is latitude in degrees, nodes[:][:][1] is \
        longitude in degrees, nodes[:][:][2] is depth in km.
    :type lags: :class: 'numpy.array'
    :param lags: Array of arrays where lags[i][:] refers to stations[i]. \
        lags[i][j] should be the delay to the nodes[i][j] for stations[i] in \
        seconds.
    :type stream: :class: `obspy.Stream`
    :param data: Data through which to look for detections.
    :type threshold: float
    :param threshold: Threshold value for detection of template within the \
        brightness function
    :type thresh_type: str
    :param thresh_type: Either MAD or abs where MAD is the Median Absolute \
        Deviation and abs is an absoulte brightness.
    :type template_length: float
    :param template_length: Length of template to extract in seconds
    :type template_saveloc: str
    :param template_saveloc: Path of where to save the templates.
    :type coherence_thresh: tuple of floats
    :param coherence_thresh: Threshold for removing incoherant peaks in the \
            network response, those below this will not be used as templates. \
            Must be in the form of (a,b) where the coherence is given by: \
            a-kchan/b where kchan is the number of channels used to compute \
            the coherence
    :type coherence_stations: list
    :param coherence_stations: List of stations to use in the coherance \
            thresholding - defaults to 'all' which uses all the stations.
    :type coherence_clip: float
    :param coherence_clip: tuple
    :type coherence_clip: Start and end in seconds of data to window around, \
            defaults to False, which uses all the data given.
    :type pre_pick: float
    :param pre_pick: Seconds before the detection time to include in template
    :type plotsave: bool
    :param plotsave: Save or show plots, if False will try and show the plots \
            on screen - as this is designed for bulk use this is set to \
            True to save any plots rather than show them if you create \
            them - changes the backend of matplotlib, so if is set to \
            False you will see NO PLOTS!
    :type cores: int
    :param core: Number of cores to use, defaults to 1.
    :type clip_level: float
    :param clip_level: Multiplier applied to the mean deviation of the energy \
                    as an upper limit, used to remove spikes (earthquakes, \
                    lightning, electircal spikes) from the energy stack.
    :type gap: float
    :param gap: Minimum inter-event time in seconds for detections

    :return: list of templates as :class: `obspy.Stream` objects
    """
    from eqcorrscan.core.template_gen import _template_gen
    if plotsave:
        import matplotlib
        matplotlib.use('Agg')
        import matplotlib.pyplot as plt
        plt.ioff()
    # from joblib import Parallel, delayed
    from multiprocessing import Pool, cpu_count
    from copy import deepcopy
    from obspy import read as obsread
    from obspy.core.event import Catalog, Event, Pick, WaveformStreamID, Origin
    from obspy.core.event import EventDescription, CreationInfo, Comment
    import obspy.Stream
    import matplotlib.pyplot as plt
    from eqcorrscan.utils import EQcorrscan_plotting as plotting
    # Check that we actually have the correct stations
    realstations = []
    for station in stations:
        st = stream.select(station=station)
        if st:
            realstations += station
    del st
    stream_copy = stream.copy()
    # Force convert to int16
    for tr in stream_copy:
        # int16 max range is +/- 32767
        if max(abs(tr.data)) > 32767:
            tr.data = 32767 * (tr.data / max(abs(tr.data)))
            # Make sure that the data aren't clipped it they are high gain
            # scale the data
        tr.data = tr.data.astype(np.int16)
    # The internal _node_loop converts energy to int16 too to converse memory,
    # to do this it forces the maximum of a single energy trace to be 500 and
    # normalises to this level - this only works for fewer than 65 channels of
    # data
    if len(stream_copy) > 130:
        raise OverflowError('Too many streams, either re-code and cope with' +
                            'either more memory usage, or less precision, or' +
                            'reduce data volume')
    detections = []
    detect_lags = []
    parallel = True
    plotvar = True
    mem_issue = False
    # Loop through each node in the input
    # Linear run
    print('Computing the energy stacks')
    if not parallel:
        for i in range(0, len(nodes)):
            print(i)
            if not mem_issue:
                j, a = _node_loop(stations, lags[:, i], stream, plot=True)
                if 'energy' not in locals():
                    energy = a
                else:
                    energy = np.concatenate((energy, a), axis=0)
                print('energy: ' + str(np.shape(energy)))
            else:
                j, filename = _node_loop(stations, lags[:, i], stream, i,
                                         mem_issue)
        energy = np.array(energy)
        print(np.shape(energy))
    else:
        # Parallel run
        num_cores = cores
        if num_cores > len(nodes):
            num_cores = len(nodes)
        if num_cores > cpu_count():
            num_cores = cpu_count()
        pool = Pool(processes=num_cores)
        results = [
            pool.apply_async(_node_loop,
                             args=(stations, lags[:, i], stream, i, clip_level,
                                   mem_issue, instance))
            for i in range(len(nodes))
        ]
        pool.close()
        if not mem_issue:
            print('Computing the cumulative network response from memory')
            energy = [p.get() for p in results]
            pool.join()
            energy.sort(key=lambda tup: tup[0])
            energy = [node[1] for node in energy]
            energy = np.concatenate(energy, axis=0)
            print(energy.shape)
        else:
            pool.join()
    # Now compute the cumulative network response and then detect possible
    # events
    if not mem_issue:
        print(energy.shape)
        indeces = np.argmax(energy, axis=0)  # Indeces of maximum energy
        print(indeces.shape)
        cum_net_resp = np.array([np.nan] * len(indeces))
        cum_net_resp[0] = energy[indeces[0]][0]
        peak_nodes = [nodes[indeces[0]]]
        for i in range(1, len(indeces)):
            cum_net_resp[i] = energy[indeces[i]][i]
            peak_nodes.append(nodes[indeces[i]])
        del energy, indeces
    else:
        print('Reading the temp files and computing network response')
        node_splits = len(nodes) // num_cores
        indeces = [range(node_splits)]
        for i in range(1, num_cores - 1):
            indeces.append(range(node_splits * i, node_splits * (i + 1)))
        indeces.append(range(node_splits * (i + 1), len(nodes)))
        pool = Pool(processes=num_cores)
        results = [
            pool.apply_async(_cum_net_resp, args=(indeces[i], instance))
            for i in range(num_cores)
        ]
        pool.close()
        results = [p.get() for p in results]
        pool.join()
        responses = [result[0] for result in results]
        print(np.shape(responses))
        node_indeces = [result[1] for result in results]
        cum_net_resp = np.array(responses)
        indeces = np.argmax(cum_net_resp, axis=0)
        print(indeces.shape)
        print(cum_net_resp.shape)
        cum_net_resp = np.array(
            [cum_net_resp[indeces[i]][i] for i in range(len(indeces))])
        peak_nodes = [
            nodes[node_indeces[indeces[i]][i]] for i in range(len(indeces))
        ]
        del indeces, node_indeces
    if plotvar:
        cum_net_trace = deepcopy(stream[0])
        cum_net_trace.data = cum_net_resp
        cum_net_trace.stats.station = 'NR'
        cum_net_trace.stats.channel = ''
        cum_net_trace.stats.network = 'Z'
        cum_net_trace.stats.location = ''
        cum_net_trace.stats.starttime = stream[0].stats.starttime
        cum_net_trace = obspy.Stream(cum_net_trace)
        cum_net_trace += stream.select(channel='*N')
        cum_net_trace += stream.select(channel='*1')
        cum_net_trace.sort(['network', 'station', 'channel'])
        # np.save('cum_net_resp.npy',cum_net_resp)
        #     cum_net_trace.plot(size=(800,600), equal_scale=False,\
        #                        outfile='NR_timeseries.eps')

    # Find detection within this network response
    print('Finding detections in the cumulatve network response')
    detections = _find_detections(cum_net_resp, peak_nodes, threshold,
                                  thresh_type, stream[0].stats.sampling_rate,
                                  realstations, gap)
    del cum_net_resp
    templates = []
    nodesout = []
    good_detections = []
    if detections:
        print('Converting detections in to templates')
        # Generate a catalog of detections
        detections_cat = Catalog()
        for j, detection in enumerate(detections):
            print('Converting for detection ' + str(j) + ' of ' +
                  str(len(detections)))
            # Create an event for each detection
            event = Event()
            # Set up some header info for the event
            event.event_descriptions.append(EventDescription())
            event.event_descriptions[0].text = 'Brightness detection'
            event.creation_info = CreationInfo(agency_id='EQcorrscan')
            copy_of_stream = deepcopy(stream_copy)
            # Convert detections to obspy.core.event type -
            # name of detection template is the node.
            node = (detection.template_name.split('_')[0],
                    detection.template_name.split('_')[1],
                    detection.template_name.split('_')[2])
            print(node)
            # Look up node in nodes and find the associated lags
            index = nodes.index(node)
            detect_lags = lags[:, index]
            ksta = Comment(text='Number of stations=' + len(detect_lags))
            event.origins.append(Origin())
            event.origins[0].comments.append(ksta)
            event.origins[0].time = copy_of_stream[0].stats.starttime +\
                detect_lags[0] + detection.detect_time
            event.origins[0].latitude = node[0]
            event.origins[0].longitude = node[1]
            event.origins[0].depth = node[2]
            for i, detect_lag in enumerate(detect_lags):
                station = stations[i]
                st = copy_of_stream.select(station=station)
                if len(st) != 0:
                    for tr in st:
                        _waveform_id = WaveformStreamID(
                            station_code=tr.stats.station,
                            channel_code=tr.stats.channel,
                            network_code='NA')
                        event.picks.append(
                            Pick(waveform_id=_waveform_id,
                                 time=tr.stats.starttime + detect_lag +
                                 detection.detect_time + pre_pick,
                                 onset='emergent',
                                 evalutation_mode='automatic'))
            print('Generating template for detection: ' + str(j))
            template = (_template_gen(event.picks, copy_of_stream,
                                      template_length, 'all'))
            template_name = template_saveloc + '/' +\
                str(template[0].stats.starttime) + '.ms'
            # In the interests of RAM conservation we write then read
            # Check coherancy here!
            temp_coher, kchan = coherence(template, coherence_stations,
                                          coherence_clip)
            coh_thresh = float(coherence_thresh[0]) - kchan / \
                float(coherence_thresh[1])
            if temp_coher > coh_thresh:
                template.write(template_name, format="MSEED")
                print('Written template as: ' + template_name)
                print('---------------------------------coherence LEVEL: ' +
                      str(temp_coher))
                coherant = True
            else:
                print('Template was incoherant, coherence level: ' +
                      str(temp_coher))
                coherant = False
            del copy_of_stream, tr, template
            if coherant:
                templates.append(obsread(template_name))
                nodesout += [node]
                good_detections.append(detection)
            else:
                print('No template for you')
    if plotvar:
        all_detections = [(cum_net_trace[-1].stats.starttime +
                           detection.detect_time).datetime
                          for detection in detections]
        good_detections = [(cum_net_trace[-1].stats.starttime +
                            detection.detect_time).datetime
                           for detection in good_detections]
        if not plotsave:
            plotting.NR_plot(cum_net_trace[0:-1],
                             obspy.Stream(cum_net_trace[-1]),
                             detections=good_detections,
                             size=(18.5, 10),
                             title='Network response')
            # cum_net_trace.plot(size=(800,600), equal_scale=False)
        else:
            savefile = 'plots/' +\
                cum_net_trace[0].stats.starttime.datetime.strftime('%Y%m%d') +\
                '_NR_timeseries.pdf'
            plotting.NR_plot(cum_net_trace[0:-1],
                             obspy.Stream(cum_net_trace[-1]),
                             detections=good_detections,
                             size=(18.5, 10),
                             save=savefile,
                             title='Network response')
    nodesout = list(set(nodesout))
    return templates, nodesout
Ejemplo n.º 32
0
def from_sfile(sfile,
               lowcut,
               highcut,
               samp_rate,
               filt_order,
               length,
               swin,
               debug=0):
    r"""Function to read in picks from sfile then generate the template from the
    picks within this and the wavefile found in the pick file.

    :type sfile: string
    :param sfile: sfilename must be the\
    path to a seisan nordic type s-file containing waveform and pick\
    information.
    :type lowcut: float
    :param lowcut: Low cut (Hz), if set to None will look in template\
            defaults file
    :type highcut: float
    :param lowcut: High cut (Hz), if set to None will look in template\
            defaults file
    :type samp_rate: float
    :param samp_rate: New sampling rate in Hz, if set to None will look in\
            template defaults file
    :type filt_order: int
    :param filt_order: Filter level, if set to None will look in\
            template defaults file
    :type swin: str
    :param swin: Either 'all', 'P' or 'S', to select which phases to output.
    :type length: float
    :param length: Extract length in seconds, if None will look in template\
            defaults file.
    :type debug: int
    :param debug: Debug level, higher number=more output.
    """
    # Perform some checks first
    import os
    if not os.path.isfile(sfile):
        raise IOError('sfile does not exist')

    from eqcorrscan.utils import pre_processing
    from eqcorrscan.utils import Sfile_util
    from obspy import read as obsread
    # Read in the header of the sfile
    wavefiles = Sfile_util.readwavename(sfile)
    pathparts = sfile.split('/')[0:-1]
    for part in pathparts:
        if part == 'REA':
            part = 'WAV'
    wavpath = os.path.join(pathparts)
    # Read in waveform file
    for wavefile in wavefiles:
        print ''.join(
            ["I am going to read waveform data from: ", wavpath, wavefile])
        if 'st' not in locals():
            st = obsread(wavpath + wavefile)
        else:
            st += obsread(wavpath + wavefile)
    for tr in st:
        if tr.stats.sampling_rate < samp_rate:
            print 'Sampling rate of data is lower than sampling rate asked for'
            print 'Not good practice for correlations: I will not do this'
            raise ValueError("Trace: " + tr.stats.station +
                             " sampling rate: " + str(tr.stats.sampling_rate))
    # Read in pick info
    picks = Sfile_util.readpicks(sfile)
    print "I have found the following picks"
    for pick in picks:
        print ' '.join(
            [pick.station, pick.channel, pick.phase,
             str(pick.time)])

    # Process waveform data
    st = pre_processing.shortproc(st, lowcut, highcut, filt_order, samp_rate,
                                  debug)
    st1 = _template_gen(picks, st, length, swin)
    return st1