Esempio n. 1
0
#!/usr/bin/env python
from obspy.core import UTCDateTime
from obspy.clients.fdsn import Client
from obspy.signal.spectral_estimation import get_nlnm, get_nhnm
from scipy.optimize import fmin
from scipy import signal
import matplotlib.pyplot as plt
import numpy as np
stime = UTCDateTime('2019-271T00:00:00')
etime = stime + 1 * 24 * 60 * 60

# This code generates figure 2 of Ringler et al. magnetic paper.

import matplotlib as mpl
mpl.rc('font', family='serif')
mpl.rc('font', serif='Times')
#mpl.rc('text', usetex=True)
mpl.rc('font', size=16)

chans = ['LH1', 'LH2', 'LHZ']
locs = ['10', '00']
fig = plt.figure(2, figsize=(16, 16))
for idx, loc in enumerate(locs):
    plt.subplot(2, 2, 2 * idx + 1)
    for chan in chans:

        #net, sta, loc, chan = 'TA', 'H22K', '*', 'LHZ'
        net, sta = 'IU', 'COLA'

        pmax = 1. / 2000.
        pmin = 1. / 10.
Esempio n. 2
0
 def test_coincidenceTrigger(self):
     """
     Test network coincidence trigger.
     """
     st = Stream()
     files = [
         "BW.UH1._.SHZ.D.2010.147.cut.slist.gz",
         "BW.UH2._.SHZ.D.2010.147.cut.slist.gz",
         "BW.UH3._.SHZ.D.2010.147.cut.slist.gz",
         "BW.UH4._.EHZ.D.2010.147.cut.slist.gz"
     ]
     for filename in files:
         filename = os.path.join(self.path, filename)
         st += read(filename)
     # some prefiltering used for UH network
     st.filter('bandpass', freqmin=10, freqmax=20)
     # 1. no weighting, no stations specified, good settings
     # => 3 events, no false triggers
     # for the first test we make some additional tests regarding types
     res = coincidenceTrigger("recstalta",
                              3.5,
                              1,
                              st.copy(),
                              3,
                              sta=0.5,
                              lta=10)
     self.assertTrue(isinstance(res, list))
     self.assertTrue(len(res) == 3)
     expected_keys = [
         'time', 'coincidence_sum', 'duration', 'stations', 'trace_ids'
     ]
     expected_types = [UTCDateTime, float, float, list, list]
     for item in res:
         self.assertTrue(isinstance(item, dict))
         for key, _type in zip(expected_keys, expected_types):
             self.assertTrue(key in item)
             self.assertTrue(isinstance(item[key], _type))
     self.assertTrue(res[0]['time'] > UTCDateTime("2010-05-27T16:24:31"))
     self.assertTrue(res[0]['time'] < UTCDateTime("2010-05-27T16:24:35"))
     self.assertTrue(4.2 < res[0]['duration'] < 4.8)
     self.assertTrue(res[0]['stations'] == ['UH3', 'UH2', 'UH1', 'UH4'])
     self.assertTrue(res[0]['coincidence_sum'] == 4)
     self.assertTrue(res[1]['time'] > UTCDateTime("2010-05-27T16:26:59"))
     self.assertTrue(res[1]['time'] < UTCDateTime("2010-05-27T16:27:03"))
     self.assertTrue(3.2 < res[1]['duration'] < 3.7)
     self.assertTrue(res[1]['stations'] == ['UH2', 'UH3', 'UH1'])
     self.assertTrue(res[1]['coincidence_sum'] == 3)
     self.assertTrue(res[2]['time'] > UTCDateTime("2010-05-27T16:27:27"))
     self.assertTrue(res[2]['time'] < UTCDateTime("2010-05-27T16:27:33"))
     self.assertTrue(4.2 < res[2]['duration'] < 4.4)
     self.assertTrue(res[2]['stations'] == ['UH3', 'UH2', 'UH1', 'UH4'])
     self.assertTrue(res[2]['coincidence_sum'] == 4)
     # 2. no weighting, station selection
     # => 2 events, no false triggers
     trace_ids = ['BW.UH1..SHZ', 'BW.UH3..SHZ', 'BW.UH4..EHZ']
     # ignore UserWarnings
     with warnings.catch_warnings(record=True):
         warnings.simplefilter('ignore', UserWarning)
         re = coincidenceTrigger("recstalta",
                                 3.5,
                                 1,
                                 st.copy(),
                                 3,
                                 trace_ids=trace_ids,
                                 sta=0.5,
                                 lta=10)
         self.assertTrue(len(re) == 2)
         self.assertTrue(re[0]['time'] > UTCDateTime("2010-05-27T16:24:31"))
         self.assertTrue(re[0]['time'] < UTCDateTime("2010-05-27T16:24:35"))
         self.assertTrue(4.2 < re[0]['duration'] < 4.8)
         self.assertTrue(re[0]['stations'] == ['UH3', 'UH1', 'UH4'])
         self.assertTrue(re[0]['coincidence_sum'] == 3)
         self.assertTrue(re[1]['time'] > UTCDateTime("2010-05-27T16:27:27"))
         self.assertTrue(re[1]['time'] < UTCDateTime("2010-05-27T16:27:33"))
         self.assertTrue(4.2 < re[1]['duration'] < 4.4)
         self.assertTrue(re[1]['stations'] == ['UH3', 'UH1', 'UH4'])
         self.assertTrue(re[1]['coincidence_sum'] == 3)
     # 3. weighting, station selection
     # => 3 events, no false triggers
     trace_ids = {
         'BW.UH1..SHZ': 0.4,
         'BW.UH2..SHZ': 0.35,
         'BW.UH3..SHZ': 0.4,
         'BW.UH4..EHZ': 0.25
     }
     res = coincidenceTrigger("recstalta",
                              3.5,
                              1,
                              st.copy(),
                              1.0,
                              trace_ids=trace_ids,
                              sta=0.5,
                              lta=10)
     self.assertTrue(len(res) == 3)
     self.assertTrue(res[0]['time'] > UTCDateTime("2010-05-27T16:24:31"))
     self.assertTrue(res[0]['time'] < UTCDateTime("2010-05-27T16:24:35"))
     self.assertTrue(4.2 < res[0]['duration'] < 4.8)
     self.assertTrue(res[0]['stations'] == ['UH3', 'UH2', 'UH1', 'UH4'])
     self.assertTrue(res[0]['coincidence_sum'] == 1.4)
     self.assertTrue(res[1]['time'] > UTCDateTime("2010-05-27T16:26:59"))
     self.assertTrue(res[1]['time'] < UTCDateTime("2010-05-27T16:27:03"))
     self.assertTrue(3.2 < res[1]['duration'] < 3.7)
     self.assertTrue(res[1]['stations'] == ['UH2', 'UH3', 'UH1'])
     self.assertTrue(res[1]['coincidence_sum'] == 1.15)
     self.assertTrue(res[2]['time'] > UTCDateTime("2010-05-27T16:27:27"))
     self.assertTrue(res[2]['time'] < UTCDateTime("2010-05-27T16:27:33"))
     self.assertTrue(4.2 < res[2]['duration'] < 4.4)
     self.assertTrue(res[2]['stations'] == ['UH3', 'UH2', 'UH1', 'UH4'])
     self.assertTrue(res[2]['coincidence_sum'] == 1.4)
     # 4. weighting, station selection, max_len
     # => 2 events, no false triggers, small event does not overlap anymore
     trace_ids = {'BW.UH1..SHZ': 0.6, 'BW.UH2..SHZ': 0.6}
     # ignore UserWarnings
     with warnings.catch_warnings(record=True):
         warnings.simplefilter('ignore', UserWarning)
         re = coincidenceTrigger("recstalta",
                                 3.5,
                                 1,
                                 st.copy(),
                                 1.2,
                                 trace_ids=trace_ids,
                                 max_trigger_length=0.13,
                                 sta=0.5,
                                 lta=10)
         self.assertTrue(len(re) == 2)
         self.assertTrue(re[0]['time'] > UTCDateTime("2010-05-27T16:24:31"))
         self.assertTrue(re[0]['time'] < UTCDateTime("2010-05-27T16:24:35"))
         self.assertTrue(0.2 < re[0]['duration'] < 0.3)
         self.assertTrue(re[0]['stations'] == ['UH2', 'UH1'])
         self.assertTrue(re[0]['coincidence_sum'] == 1.2)
         self.assertTrue(re[1]['time'] > UTCDateTime("2010-05-27T16:27:27"))
         self.assertTrue(re[1]['time'] < UTCDateTime("2010-05-27T16:27:33"))
         self.assertTrue(0.18 < re[1]['duration'] < 0.2)
         self.assertTrue(re[1]['stations'] == ['UH2', 'UH1'])
         self.assertTrue(re[1]['coincidence_sum'] == 1.2)
     # 5. station selection, extremely sensitive settings
     # => 4 events, 1 false triggers
     res = coincidenceTrigger("recstalta",
                              2.5,
                              1,
                              st.copy(),
                              2,
                              trace_ids=['BW.UH1..SHZ', 'BW.UH3..SHZ'],
                              sta=0.3,
                              lta=5)
     self.assertTrue(len(res) == 5)
     self.assertTrue(res[3]['time'] > UTCDateTime("2010-05-27T16:27:01"))
     self.assertTrue(res[3]['time'] < UTCDateTime("2010-05-27T16:27:02"))
     self.assertTrue(1.5 < res[3]['duration'] < 1.7)
     self.assertTrue(res[3]['stations'] == ['UH3', 'UH1'])
     self.assertTrue(res[3]['coincidence_sum'] == 2.0)
     # 6. same as 5, gappy stream
     # => same as 5 (almost, duration of 1 event changes by 0.02s)
     st2 = st.copy()
     tr1 = st2.pop(0)
     t1 = tr1.stats.starttime
     t2 = tr1.stats.endtime
     td = t2 - t1
     tr1a = tr1.slice(starttime=t1, endtime=t1 + 0.45 * td)
     tr1b = tr1.slice(starttime=t1 + 0.6 * td, endtime=t1 + 0.94 * td)
     st2.insert(1, tr1a)
     st2.insert(3, tr1b)
     res = coincidenceTrigger("recstalta",
                              2.5,
                              1,
                              st2,
                              2,
                              trace_ids=['BW.UH1..SHZ', 'BW.UH3..SHZ'],
                              sta=0.3,
                              lta=5)
     self.assertTrue(len(res) == 5)
     self.assertTrue(res[3]['time'] > UTCDateTime("2010-05-27T16:27:01"))
     self.assertTrue(res[3]['time'] < UTCDateTime("2010-05-27T16:27:02"))
     self.assertTrue(1.5 < res[3]['duration'] < 1.7)
     self.assertTrue(res[3]['stations'] == ['UH3', 'UH1'])
     self.assertTrue(res[3]['coincidence_sum'] == 2.0)
     # 7. same as 3 but modify input trace ids and check output of trace_ids
     # and other additional information with ``details=True``
     st2 = st.copy()
     st2[0].stats.network = "XX"
     st2[1].stats.location = "99"
     st2[1].stats.network = ""
     st2[1].stats.location = "99"
     st2[1].stats.channel = ""
     st2[2].stats.channel = "EHN"
     st2[3].stats.network = ""
     st2[3].stats.channel = ""
     st2[3].stats.station = ""
     trace_ids = {
         'XX.UH1..SHZ': 0.4,
         '.UH2.99.': 0.35,
         'BW.UH3..EHN': 0.4,
         '...': 0.25
     }
     res = coincidenceTrigger("recstalta",
                              3.5,
                              1,
                              st2,
                              1.0,
                              trace_ids=trace_ids,
                              details=True,
                              sta=0.5,
                              lta=10)
     self.assertTrue(len(res) == 3)
     self.assertTrue(res[0]['time'] > UTCDateTime("2010-05-27T16:24:31"))
     self.assertTrue(res[0]['time'] < UTCDateTime("2010-05-27T16:24:35"))
     self.assertTrue(4.2 < res[0]['duration'] < 4.8)
     self.assertTrue(res[0]['stations'] == ['UH3', 'UH2', 'UH1', ''])
     self.assertTrue(res[0]['trace_ids'][0] == st2[2].id)
     self.assertTrue(res[0]['trace_ids'][1] == st2[1].id)
     self.assertTrue(res[0]['trace_ids'][2] == st2[0].id)
     self.assertTrue(res[0]['trace_ids'][3] == st2[3].id)
     self.assertTrue(res[0]['coincidence_sum'] == 1.4)
     self.assertTrue(res[1]['time'] > UTCDateTime("2010-05-27T16:26:59"))
     self.assertTrue(res[1]['time'] < UTCDateTime("2010-05-27T16:27:03"))
     self.assertTrue(3.2 < res[1]['duration'] < 3.7)
     self.assertTrue(res[1]['stations'] == ['UH2', 'UH3', 'UH1'])
     self.assertTrue(res[1]['trace_ids'][0] == st2[1].id)
     self.assertTrue(res[1]['trace_ids'][1] == st2[2].id)
     self.assertTrue(res[1]['trace_ids'][2] == st2[0].id)
     self.assertTrue(res[1]['coincidence_sum'] == 1.15)
     self.assertTrue(res[2]['time'] > UTCDateTime("2010-05-27T16:27:27"))
     self.assertTrue(res[2]['time'] < UTCDateTime("2010-05-27T16:27:33"))
     self.assertTrue(4.2 < res[2]['duration'] < 4.4)
     self.assertTrue(res[2]['stations'] == ['UH3', 'UH2', 'UH1', ''])
     self.assertTrue(res[2]['trace_ids'][0] == st2[2].id)
     self.assertTrue(res[2]['trace_ids'][1] == st2[1].id)
     self.assertTrue(res[2]['trace_ids'][2] == st2[0].id)
     self.assertTrue(res[2]['trace_ids'][3] == st2[3].id)
     self.assertTrue(res[2]['coincidence_sum'] == 1.4)
     expected_keys = [
         'cft_peak_wmean', 'cft_std_wmean', 'cft_peaks', 'cft_stds'
     ]
     expected_types = [float, float, list, list]
     for item in res:
         for key, _type in zip(expected_keys, expected_types):
             self.assertTrue(key in item)
             self.assertTrue(isinstance(item[key], _type))
     # check some of the detailed info
     ev = res[-1]
     self.assertAlmostEquals(ev['cft_peak_wmean'], 18.097582068353855)
     self.assertAlmostEquals(ev['cft_std_wmean'], 4.7972436395074087)
     self.assertAlmostEquals(ev['cft_peaks'][0], 18.973097608513633)
     self.assertAlmostEquals(ev['cft_peaks'][1], 16.852175794415011)
     self.assertAlmostEquals(ev['cft_peaks'][2], 18.64005853900883)
     self.assertAlmostEquals(ev['cft_peaks'][3], 17.572363634564621)
     self.assertAlmostEquals(ev['cft_stds'][0], 4.8811165222946951)
     self.assertAlmostEquals(ev['cft_stds'][1], 4.4446373508521804)
     self.assertAlmostEquals(ev['cft_stds'][2], 5.3499401252675964)
     self.assertAlmostEquals(ev['cft_stds'][3], 4.2723814539487703)
Esempio n. 3
0
import asknow
imp.reload(asknow)
from asknow import asknow_humidity_fc28, asknow_photoresistor

# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #

host = "192.168.0.12"
user = "******"
password = "******"
database = "test"

modname = "Indoor"  # Indoor, Exterior

timewindow = 60. * 60 * 24
t1 = UTCDateTime(datetime.datetime.now()) - timewindow
t2 = UTCDateTime(datetime.datetime.now())

timeh, humidity, humidity_ohm = asknow_humidity_fc28(host,
                                                     user,
                                                     password,
                                                     database,
                                                     t1,
                                                     t2,
                                                     modname,
                                                     verbose=True,
                                                     doplot=True)
timel, lux, lux_ohm = asknow_photoresistor(host,
                                           user,
                                           password,
                                           database,
Esempio n. 4
0
paz = {
    'zeros':
    [0.j, 0.j, -392. + 0.j, -1960. + 0.j, -1490. + 1740.j, -1490. - 1740.j],
    'poles': [
        -0.03691 + 0.03702j, -0.03691 - 0.03702j, -343. + 0.j, -370. + 467.j,
        -370. - 467.j, -836. + 1522.j, -836. - 1522.j, -4900. + 4700.j,
        -4900. - 4700.j, -6900. + 0.j, -15000. + 0.j
    ],
    'gain':
    4.344928 * 10**17,
    'sensitivity':
    754.3 * 2.**26 / 40.
}

stimes = []
stimes.append(UTCDateTime('2016-196T01:00:00'))
stimes.append(UTCDateTime('2016-197T05:30:00'))
stimes.append(UTCDateTime('2016-198T02:00:00'))
stimes.append(UTCDateTime('2016-201T22:00:00'))
stimes.append(UTCDateTime('2016-203T04:00:00'))
stimes.append(UTCDateTime('2016-206T01:00:00'))
stimes.append(UTCDateTime('2016-208T03:40:00'))
stimes.append(UTCDateTime('2016-213T11:50:00'))
stimes.append(UTCDateTime('2016-215T02:00:00'))
stimes.append(UTCDateTime('2016-216T05:00:00'))
stimes.append(UTCDateTime('2016-217T07:00:00'))
stimes.append(UTCDateTime('2016-218T07:00:00'))
stimes.append(UTCDateTime('2016-220T04:00:00'))
stimes.append(UTCDateTime('2016-222T06:00:00'))
stimes.append(UTCDateTime('2016-223T05:00:00'))
Esempio n. 5
0
serviceParam = rsam.readConfigFile(configurationFile)
if serviceParam == -1:
    print("Error reading configuration file: %s" % configurationFile)
    exit(-1)

conSrv = rsam.chooseService(serviceParam[server])
if conSrv == -1:
    print("Error connecting service: %s" % server)
    exit(-1)

stations = rsam.readConfigFile(stationsFile)
if stations == -1:
    print("Error reading stations file: %s" % stationsFile)
    exit(-1)

conDB = DBConexion.createConexionDB(prmDB[DB]['host'], prmDB[DB]['port'],
                                    prmDB[DB]['user'], prmDB[DB]['pass'],
                                    prmDB[DB]['DBName'])
if conDB == -1:
    print("Error in createConexionDB %s" % (prmDB[DB]))
    exit(-1)

#diaString=datetime.now().strftime('%Y-%m-%d %H:%M:00')
#diaUTC=UTCDateTime(diaString) - 60
#diaUTC=UTCDateTime('2016-12-16 19:08:00')
#diaUTC=UTCDateTime(UTCDateTime('2016-12-28 10:29:00'))

diaUTC = UTCDateTime(UTCDateTime.now().strftime("%Y-%m-%d %H:%M:00")) - 120

rsam.rsamUpdate(stations, diaUTC, server, conSrv, conDB)
def plotXcorrEvent(st, stn, stack, maxlag, acausal=False, figurename=None):

    eventtime = UTCDateTime(1998, 7, 15, 4, 53, 21, 0)  # event near MLAC

    # station locations
    latP, lonP = 35.41, -120.55  # station PHL
    latM, lonM = 37.63, -118.84  # station MLAC
    latE, lonE = 37.55, -118.809  # event 1998

    # calculate distance between stations
    dist = gps2DistAzimuth(latP, lonP, latM, lonM)[0]  # between PHL and MLAC
    distE = gps2DistAzimuth(latP, lonP, latE, lonE)[0]  # between event and PHL
    #
    # CROSSCORRELATION
    # reverse stack to plot acausal part (= negative times of correlation)
    if acausal:
        stack = stack[::-1]

    # find center of stack
    c = int(np.ceil(len(stack) / 2.) + 1)

    #cut stack to maxlag
    stack = stack[c - maxlag * int(np.ceil(stn[0].stats.sampling_rate)):c +
                  maxlag * int(np.ceil(stn[0].stats.sampling_rate))]

    # find new center of stack
    c2 = int(np.ceil(len(stack) / 2.) + 1)

    # define time vector for cross correlation
    limit = (len(stack) / 2.) * stn[0].stats.delta
    timevec = np.arange(-limit, limit, stn[0].stats.delta)
    # define timevector: dist / t
    timevecDist = dist / timevec

    # EVENT
    ste = st.copy()
    st_PHL_e = ste.select(station='PHL')

    # cut down event trace to 'maxlag' seconds
    dt = len(stack[c2:]) / stn[0].stats.sampling_rate  #xcorrlength
    st_PHL_e[0].trim(eventtime, eventtime + dt)

    # create time vector for event signal
    # extreme values:
    limit = st_PHL_e[0].stats.npts * st_PHL_e[0].stats.delta
    timevecSig = np.arange(0, limit, st_PHL_e[0].stats.delta)

    # PLOTTING
    fig = plt.figure(figsize=(12.0, 6.0))
    ax1 = fig.add_subplot(2, 1, 1)
    ax2 = fig.add_subplot(2, 1, 2)

    # plot noise correlation
    ax1.plot(timevecDist[c2:], stack[c2:], 'k')
    ax1.set_title('Noise correlation between MLAC and PHL')

    # plot event near MLAC measured at PHL
    ax2.plot(distE / timevecSig,
             st_PHL_e[0].data / np.max(np.abs(st_PHL_e[0].data)), 'r')
    ax2.set_title('Event near MLAC observed at PHL')

    ax2.set_xlim((0, 8000))
    ax1.set_xlim((0, 8000))

    ax2.set_xlabel("group velocity [m/s]")

    if figurename is not None:
        fig.savefig(figurename, format="pdf")
    else:
        plt.show()
Esempio n. 7
0
    def downloadWaveforms(self):
        # first check to make sure the boxes are filled...addWidget
        # TODO!!!

        # get the inputs...inputs
        service = self.cb.currentText()
        if (service == 'choose...'):
            msg = QMessageBox()
            msg.setIcon(QMessageBox.Warning)
            msg.setText('Please select a service to search')
            msg.setWindowTitle('oops...')
            msg.setStandardButtons(QMessageBox.Ok)
            msg.exec_()
            return

        # Clear old streams because we don't need them anymore
        self.clearWaveforms()

        network = self.networkNameBox.text().upper().replace(' ', '')
        self.networkNameBox.setText(network)
        station = self.stationNameBox.text().upper().replace(' ', '')
        self.stationNameBox.setText(station)
        location = self.location_Box.text().upper().replace(' ', '')
        self.location_Box.setText(location)
        channel = self.channel_Box.text().upper().replace(' ', '')
        self.channel_Box.setText(channel)
        date = self.startDate_edit.date().toPyDate()
        time = self.startTime_edit.time().toPyTime()
        traceLength = self.traceLength_t.value()
        utcString = str(date) + 'T' + str(time)
        startTime = UTCDateTime(utcString)
        endTime = startTime + traceLength

        # Check for unfilled boxes
        if (network == '' or station == '' or channel == ''):
            msg = QMessageBox()
            msg.setIcon(QMessageBox.Warning)
            msg.setText('You are missing some important info...')
            msg.setWindowTitle('oops...')
            msg.setDetailedText(
                "Network, Station, Location, and Channel are all required data."
            )
            msg.setStandardButtons(QMessageBox.Ok)
            msg.exec_()
            return

        # Download the waveforms
        # self.parent.setStatus('connecting to '+service)
        client = Client(service)

        # self.parent.setStatus('downloading Waveforms...')
        try:
            self.stream = client.get_waveforms(network, station, location,
                                               channel, startTime, endTime)

        except Exception:
            # self.parent.setStatus('')
            msg = QMessageBox()
            msg.setIcon(QMessageBox.Warning)
            msg.setText('Failure loading waveform')
            msg.setWindowTitle('oops...')
            msg.setDetailedText(
                "Double check that the values you entered are valid and the time and date are appropriate."
            )
            msg.setStandardButtons(QMessageBox.Ok)
            msg.exec_()
            return

        for trace in self.stream:
            trace.data = trace.data - np.mean(trace.data)
        self.stream.merge(fill_value=0)

        # self.parent.setStatus('downloading Inventory...')
        # self.parent.consoleBox.append( 'Downloaded waveforms from '+service )

        # Now get the corresponding stations
        try:
            self.inventory = client.get_stations(network=network,
                                                 station=station)
        except:
            # self.parent.setStatus('')
            msg = QMessageBox()
            msg.setIcon(QMessageBox.Warning)
            msg.setText('Failure loading Inventory')
            msg.setWindowTitle('oops...')
            msg.setDetailedText(
                "Double check that the values you entered are valid and the time and date are appropriate."
            )
            msg.setStandardButtons(QMessageBox.Ok)
            msg.exec_()

            return
Esempio n. 8
0
max_slip = 60  #Maximum slip (m) allowed in the model

# Correlation function parameters
hurst = 0.4
Ldip = 'MH2019'  # Correlation length scaling, 'auto' uses  Mai & Beroza 2002,
Lstrike = 'MH2019'  # MH2019 uses Melgar & Hayes 2019
lognormal = True
slip_standard_deviation = 0.9
num_modes = 500
rake = 90.0

# Rupture parameters
force_magnitude = True  #Make the magnitudes EXACTLY the value in target_Mw
force_area = False  #Forces using the entire fault area defined by the .fault file as opposed to the scaling laws
no_random = False  #If true uses median length/width if false draws from prob. distribution
time_epi = UTCDateTime('2016-09-07T14:42:26')  #Defines the hypocentral time
hypocenter = [
    0.8301, 0.01, 27.67
]  #Defines the specific hypocenter location if force_hypocenter=True
force_hypocenter = False  # Forces hypocenter to occur at specified lcoationa s opposed to random
mean_slip = None  #Provide path to file name of .rupt to be used as mean slip pattern
center_subfault = None  #Integer value, if != None use that subfault as center for defining rupt area. If none then slected at random
use_hypo_fraction = False  #If true use hypocenter PDF positions from Melgar & Hayes 2019, if false then selects at random

# Kinematic parameters
source_time_function = 'dreger'  # options are 'triangle' or 'cosine' or 'dreger'
rise_time_depths = [10, 15]  #Transition depths for rise time scaling
buffer_factor = 0.5  # idon't think this does anything anymore, remove?
shear_wave_fraction = 0.8  #Fraction of shear wave speed to use as mean rupture velocity

#Station information (only used when syntehsizing waveforms)
Esempio n. 9
0
        line = lines[-i].strip()
        if len(line) > 0:
            line_count = len(lines) - i + 1
            break
    if verbose:
        msg_lib.info(f'INPUT: {line_count} lines')

    #
    # get the time of each line and the power, skip headers
    #
    powerTime = list()
    for i in range(2, line_count):
        line = lines[i]
        line = line.strip()
        values = line.split()
        this_time = UTCDateTime(values[0])
        if start_date_time <= this_time <= end_date_time:
            date, time = values[0].split('T')
            dateValues = date.split('-')
            timeValues = time.split(':')
            X.append(
                datetime.datetime(int(dateValues[0]), int(dateValues[1]),
                                  int(dateValues[2]), int(timeValues[0]),
                                  int(timeValues[1])))
            Y.append(float(values[binIndex]) * factor)

msg_lib.info(f'Maximum Y: {max(Y)}')
if len(X) <= 1:
    code = msg_lib.error("No data found", 2)
    sys.exit(code)
Esempio n. 10
0
for index_event in range(len(catalog_merged_double)):
    if int(catalog_merged_double.double_block[index_event]) in multiple_double_list:
        catalog_merged_double.at[index_event,'manual'] = True
        #print(index_event, catalog_merged_double.double_block[index_event],catalog_merged_double.manual[index_event])

# Plot the events for manual inspection
for multiple_index in tqdm(range(len(multiple_double_list))):#tqdm(range(int(len(catalog_merged_double)/2))):
    # Get the index of the first event of a certain block with multiple (>2) doubles
    event_index=catalog_merged_double[catalog_merged_double['double_block']==multiple_double_list[multiple_index]].index.values[0]
    
    # Get the one or 2 files containing the waveforms around the time of interest
    segy_files=time2file_name(input_time=catalog_merged_double.datetime[event_index],sec_before=3,sec_after=3,catalog_time_n_files=catalog_time_n_files)
    #print(segy_files,"\n")
    
    # Use the function to load and slice around the time of interest
    ms_data=load_and_slice(files2load=segy_files,folder=folder_segy_files,datetime2load=UTCDateTime(catalog_merged_double.datetime[event_index]),sec_before=5,sec_after=5)
    #print('Start and endtime',ms_data[0].stats.starttime,ms_data[0].stats.endtime,"\n")

    # Plot the waveforms, times for ES and MFA and energy stack
    plot_3cV2(ms_data=ms_data,block_index=multiple_double_list[multiple_index],output_folder=folder_plots) 

#%% Part 4 - Treatement of double events with two entries

# The catalog
catalog_merged_double_two_events=catalog_merged_double[catalog_merged_double['manual']==False]
catalog_merged_double_two_events.reset_index(drop=True,inplace=True)

# At this point, I know that this catalog is composed by couples. Each couple has a 'ES' and 'MFA' entry of each event.
# The data treatment is 1) to preserve the info from MFA, and 2) rename the source for 'Both', 
# 3) drop unused (from now on) columns, 4) merge it it back to the single catalog
Esempio n. 11
0
        fkresultsT = 'FK_RESULTS'

    if args.pfk_id:
        if args.pfk_id == '*':
            print('Extract all pfk_id')
            pfk_id = None
        else:
            pfk_id = args.pfk_id
    else:
        pfk_id = None

    if args.outputtable:
        outputtable = args.outputtable

    if args.tS:
        t_S = UTCDateTime(args.tS)
        print('setting ini time:', t_S)
    else:
        t_S = None

    if args.tE:
        t_E = UTCDateTime(args.tE)
        print('setting end time:', t_E)
    else:
        t_E = None

    if args.wf:
        if args.wf == 1:
            wf = 1
        else:
            wf = 0
Esempio n. 12
0
    code = msg_lib.error(f'bad parameter file name [{param_file}]', 2)
    sys.exit(code)

network = utils_lib.get_param(args, 'net', None, usage)
station = utils_lib.get_param(args, 'sta', None, usage)
location = sta_lib.get_location(utils_lib.get_param(args, 'loc', None, usage))
channel = utils_lib.get_param(args, 'chan', None, usage)
xtype = utils_lib.get_param(args, 'xtype', None, usage)

# Specific start and end date and times from user.
# We always want to start from the beginning of the day, so we discard user hours, if any.
start_date_time = utils_lib.get_param(args, 'start', None, usage)
start_date_time = start_date_time.split('T')[0]
start_year, start_month, start_day = start_date_time.split('-')
try:
    start_datetime = UTCDateTime(start_date_time)
except Exception as ex:
    usage()
    code = msg_lib.error(f'Invalid start ({start_date_time})\n{ex}', 2)
    sys.exit(code)

end_date_time = utils_lib.get_param(args, 'end', None, usage)
end_date_time = end_date_time.split('T')[0]
end_year, end_month, end_day = end_date_time.split('-')
try:
    end_datetime = UTCDateTime(end_date_time) + 86400
except Exception as ex:
    usage()
    code = msg_lib.error(f'Invalid end ({end_date_time})\n{ex}', 2)
    sys.exit(code)
Esempio n. 13
0
    def test_sonic(self):
#        for i in xrange(100):
        np.random.seed(2348)

        geometry = np.array([[0.0, 0.0, 0.0],
                             [-5.0, 7.0, 0.0],
                             [5.0, 7.0, 0.0],
                             [10.0, 0.0, 0.0],
                             [5.0, -7.0, 0.0],
                             [-5.0, -7.0, 0.0],
                             [-10.0, 0.0, 0.0]])

        geometry /= 100      # in km
        slowness = 1.3       # in s/km
        baz_degree = 20.0    # 0.0 > source in x direction
        baz = baz_degree * np.pi / 180.
        df = 100             # samplerate
        # SNR = 100.         # signal to noise ratio
        amp = .00001         # amplitude of coherent wave
        length = 500         # signal length in samples

        coherent_wave = amp * np.random.randn(length)

        # time offsets in samples
        dt = df * slowness * (np.cos(baz) * geometry[:, 1] + np.sin(baz) *
                              geometry[:, 0])
        dt = np.round(dt)
        dt = dt.astype('int32')
        max_dt = np.max(dt) + 1
        min_dt = np.min(dt) - 1
        trl = list()
        for i in xrange(len(geometry)):
            tr = Trace(coherent_wave[-min_dt + dt[i]:-max_dt + dt[i]].copy())
                # + amp / SNR * \
                # np.random.randn(length - abs(min_dt) - abs(max_dt)))
            tr.stats.sampling_rate = df
            tr.stats.coordinates = AttribDict()
            tr.stats.coordinates.x = geometry[i, 0]
            tr.stats.coordinates.y = geometry[i, 1]
            tr.stats.coordinates.elevation = geometry[i, 2]
            # lowpass random signal to f_nyquist / 2
            tr.filter("lowpass", freq=df / 4.)
            trl.append(tr)

        st = Stream(trl)

        stime = UTCDateTime(1970, 1, 1, 0, 0)
        etime = UTCDateTime(1970, 1, 1, 0, 0) + \
                (length - abs(min_dt) - abs(max_dt)) / df

        win_len = 2.
        step_frac = 0.2
        sll_x = -3.0
        slm_x = 3.0
        sll_y = -3.0
        slm_y = 3.0
        sl_s = 0.1

        frqlow = 1.0
        frqhigh = 8.0
        prewhiten = 0

        semb_thres = -1e99
        vel_thres = -1e99

        # out returns: rel. power, abs. power, backazimuth, slowness
        out = sonic(st, win_len, step_frac, sll_x, slm_x, sll_y, slm_y, sl_s,
                    semb_thres, vel_thres, frqlow, frqhigh, stime, etime,
                    prewhiten, coordsys='xy', verbose=False)

        # returns baz
        np.testing.assert_almost_equal(out[:, 3].mean(), 18.434948822922024)
        # slowness ~= 1.3
        np.testing.assert_almost_equal(out[:, 4].mean(), 1.26491106407)
Esempio n. 14
0
from obspy import read
from matplotlib import pyplot as plt
from obspy.core import UTCDateTime
from datetime import timedelta
from numpy import diff
from mudpy.forward import lowpass

time_epi = UTCDateTime('2015-04-25T06:11:26')

tr = 10
dvert = 0.5
u1 = read(u'/Users/dmelgar/Slip_inv/Nepal_ttests_' + str(tr) +
          '/output/forward_models/' + str(tr) + 's_vr2.8.KKN4.vel.u')
u2 = read(u'/Users/dmelgar/Slip_inv/Nepal_ttests_' + str(tr) +
          '/output/forward_models/' + str(tr) + 's_vr3.0.KKN4.vel.u')
u3 = read(u'/Users/dmelgar/Slip_inv/Nepal_ttests_' + str(tr) +
          '/output/forward_models/' + str(tr) + 's_vr3.2.KKN4.vel.u')
u4 = read(u'/Users/dmelgar/Slip_inv/Nepal_ttests_' + str(tr) +
          '/output/forward_models/' + str(tr) + 's_vr3.4.KKN4.vel.u')
u5 = read(u'/Users/dmelgar/Slip_inv/Nepal_ttests_' + str(tr) +
          '/output/forward_models/' + str(tr) + 's_vr3.6.KKN4.vel.u')
u = read(u'/Users/dmelgar/Nepal2015/GPS/PPP/KKN4.LXZ.sac')

#trim
delay = 15
t1 = time_epi + timedelta(seconds=delay)
t2 = t1 + timedelta(seconds=70)
u1[0].trim(starttime=t1, endtime=t2)
u2[0].trim(starttime=t1, endtime=t2)
u3[0].trim(starttime=t1, endtime=t2)
u4[0].trim(starttime=t1, endtime=t2)
Esempio n. 15
0
def do_plotting_setup_and_run(opdict, plot_wfm=True, plot_grid=True):
    """
    Plot the results of a wavloc run (migration and location). All options and
    parameters are taken from an opdict.

    :param opdict: WavlocOptions.opdict that contains the options / parameters.
    :param plot_wfm: If ``True`` plots waveforms after location (filtered data
        and kurtosis).
    :param plot_grid: If ``True``plots the migration grid.

    :type plot_wfm: boolean
    :type plot_grid: boolean
    """

    # get / set info
    base_path = opdict['base_path']

    locfile = os.path.join(base_path, 'out', opdict['outdir'], 'loc',
                           'locations.dat')
    stackfile = os.path.join(base_path, 'out', opdict['outdir'], 'stack',
                             'combined_stack_all.hdf5')

    data_dir = os.path.join(base_path, 'data', opdict['datadir'])

    data_glob = opdict['dataglob']
    data_files = glob.glob(os.path.join(data_dir, data_glob))
    data_files.sort()

    kurt_glob = opdict['kurtglob']
    kurt_files = glob.glob(os.path.join(data_dir, kurt_glob))
    kurt_files.sort()
    mig_files = kurt_files

    if opdict['kderiv']:
        grad_glob = opdict['gradglob']
        grad_files = glob.glob(os.path.join(data_dir, grad_glob))
        grad_files.sort()
        mig_files = grad_files

        if opdict['gauss']:
            gauss_glob = opdict['gaussglob']
            gauss_files = glob.glob(os.path.join(data_dir, gauss_glob))
            gauss_files.sort()
            mig_files = gauss_files

    figdir = os.path.join(base_path, 'out', opdict['outdir'], 'fig')

    # grids
    search_grid_filename = os.path.join(base_path, 'lib',
                                        opdict['search_grid'])
    # read time grid information
    time_grids = get_interpolated_time_grids(opdict)

    # read locations
    locs = read_locs_from_file(locfile)

    # open stack file
    f_stack = h5py.File(stackfile, 'r')
    max_val = f_stack['max_val_smooth']
    stack_start_time = UTCDateTime(max_val.attrs['start_time'])

    for loc in locs:
        # generate the grids
        o_time = loc['o_time']
        start_time = o_time-opdict['plot_tbefore']
        end_time = o_time+opdict['plot_tafter']

        # re-read grid info to ensure clean copy
        grid_info = read_hdr_file(search_grid_filename)

        x = loc['x_mean']
        y = loc['y_mean']
        z = loc['z_mean']
        # get the corresponding travel-times for time-shifting
        ttimes = {}
        for sta in time_grids.keys():
            ttimes[sta] = time_grids[sta].value_at_point(x, y, z)

        tshift_migration = max(ttimes.values())

        start_time_migration = start_time-tshift_migration
        end_time_migration = end_time+tshift_migration

        if plot_grid:
            logging.info('Plotting grid for location %s' % o_time.isoformat())

            # read data
            mig_dict, delta = \
                read_data_compatible_with_time_dict(mig_files, time_grids,
                                                    start_time_migration,
                                                    end_time_migration)
            # do migration
            do_migration_loop_continuous(opdict, mig_dict, delta,
                                         start_time_migration, grid_info,
                                         time_grids, keep_grid=True)
            # plot
            plotLocationGrid(loc, grid_info, figdir,
                             opdict['plot_otime_window'])

        if plot_wfm:
            logging.info('Plotting waveforms for location %s' %
                         o_time.isoformat())

            # read data
            data_dict, delta = \
                read_data_compatible_with_time_dict(data_files, time_grids,
                                                    start_time_migration,
                                                    end_time_migration)
            mig_dict, delta = \
                read_data_compatible_with_time_dict(mig_files, time_grids,
                                                    start_time_migration,
                                                    end_time_migration)
            # cut desired portion out of data
            for sta in data_dict.keys():
                tmp = data_dict[sta]

                # alignment on origin time
                istart = np.int(np.round((start_time + ttimes[sta] -
                                          start_time_migration) / delta))
                iend = istart + np.int(np.round((opdict['plot_tbefore'] +
                                                 opdict['plot_tafter']) /
                                                delta))

                # sanity check in case event is close to start or end of data
                if istart < 0:
                    istart = 0
                if iend > len(tmp):
                    iend = len(tmp)
                data_dict[sta] = tmp[istart:iend]
                # do slice
                tmp = mig_dict[sta]
                mig_dict[sta] = tmp[istart:iend]

            # retrieve relevant portion of stack max
            istart = np.int(np.round((o_time - opdict['plot_tbefore'] -
                                      stack_start_time) / delta))
            iend = istart + np.int(np.round((opdict['plot_tbefore'] +
                                             opdict['plot_tafter']) / delta))
            # sanity check in case event is close to start or end of data
            if istart < 0:
                start_time = start_time + np.abs(istart)*delta
                istart = 0
            if iend > len(max_val):
                iend = len(max_val)
            # do slice
            stack_wfm = max_val[istart:iend]

            # plot
            plotLocationWaveforms(loc, start_time, delta, data_dict, mig_dict,
                                  stack_wfm, figdir)

    f_stack.close()
Esempio n. 16
0
    def FKCoherence(self, st, inv, DT, linf, lsup, slim, win_len, sinc,
                    method):
        def find_nearest(array, value):

            idx, val = min(enumerate(array), key=lambda x: abs(x[1] - value))
            return idx, val

        sides = 'onesided'
        pi = math.pi

        smax = slim
        smin = -1 * smax
        Sx = np.arange(smin, smax, sinc)[np.newaxis]
        Sy = np.arange(smin, smax, sinc)[np.newaxis]
        nx = ny = len(Sx[0])
        Sy = np.fliplr(Sy)

        #####Convert start from Greogorian to actual date###############
        Time = DT
        Time = Time - int(Time)
        d = date.fromordinal(int(DT))
        date1 = d.isoformat()
        H = (Time * 24)
        H1 = int(H)  # Horas
        minutes = (H - int(H)) * 60
        minutes1 = int(minutes)
        seconds = (minutes - int(minutes)) * 60
        H1 = str(H1).zfill(2)
        minutes1 = str(minutes1).zfill(2)
        seconds = "%.2f" % seconds
        seconds = str(seconds).zfill(2)
        DATE = date1 + "T" + str(H1) + minutes1 + seconds
        t1 = UTCDateTime(DATE)
        ########End conversion###############################

        st.trim(starttime=t1, endtime=t1 + win_len)
        st.sort()
        n = len(st)
        for i in range(n):
            coords = inv.get_coordinates(st[i].id)
            st[i].stats.coordinates = AttribDict({
                'latitude':
                coords['latitude'],
                'elevation':
                coords['elevation'],
                'longitude':
                coords['longitude']
            })

        coord = get_geometry(st, coordsys='lonlat', return_center=True)

        tr = st[0]
        win = len(tr.data)
        if (win % 2) == 0:
            nfft = win / 2 + 1
        else:
            nfft = (win + 1) / 2

        nr = st.count()  # number of stations
        delta = st[0].stats.delta
        fs = 1 / delta
        fn = fs / 2
        freq = np.arange(0, fn, fn / nfft)
        value1, freq1 = find_nearest(freq, linf)
        value2, freq2 = find_nearest(freq, lsup)
        df = value2 - value1
        m = np.zeros((win, nr))

        WW = np.hamming(int(win))
        WW = np.transpose(WW)
        for i in range(nr):
            tr = st[i]
            if method == "FK":
                m[:, i] = (tr.data - np.mean(tr.data)) * WW
            else:
                m[:, i] = (tr.data - np.mean(tr.data))
        pdata = np.transpose(m)

        #####Coherence######
        NW = 2  # the time-bandwidth product##Buena seleccion de 2-3
        K = 2 * NW - 1
        tapers, eigs = alg.dpss_windows(win, NW, K)
        tdata = tapers[None, :, :] * pdata[:, None, :]
        tspectra = fftpack.fft(tdata)

        w = np.empty((nr, int(K), int(nfft)))
        for i in range(nr):
            w[i], _ = utils.adaptive_weights(tspectra[i], eigs, sides=sides)

        nseq = nr
        L = int(nfft)
        #csd_mat = np.zeros((nseq, nseq, L), 'D')
        #psd_mat = np.zeros((2, nseq, nseq, L), 'd')
        coh_mat = np.zeros((nseq, nseq, L), 'd')
        #coh_var = np.zeros_like(coh_mat)
        Cx = np.ones((nr, nr, df), dtype=np.complex128)

        if method == "MTP.COHERENCE":
            for i in range(nr):
                for j in range(nr):
                    sxy = alg.mtm_cross_spectrum(tspectra[i], (tspectra[j]),
                                                 (w[i], w[j]),
                                                 sides='onesided')
                    sxx = alg.mtm_cross_spectrum(tspectra[i],
                                                 tspectra[i],
                                                 w[i],
                                                 sides='onesided')
                    syy = alg.mtm_cross_spectrum(tspectra[j],
                                                 tspectra[j],
                                                 w[j],
                                                 sides='onesided')
                    s = sxy / np.sqrt((sxx * syy))
                    cxcohe = s[value1:value2]
                    Cx[i, j, :] = cxcohe

        # Calculates Conventional FK-power
        if method == "FK":
            for i in range(nr):
                for j in range(nr):
                    A = np.fft.rfft(m[:, i])
                    B = np.fft.rfft(m[:, j])
                    #Relative Power
                    den = np.absolute(A) * np.absolute(np.conjugate(B))
                    out = (A * np.conjugate(B)) / den
                    cxcohe = out[value1:value2]
                    Cx[i, j, :] = cxcohe

        r = np.zeros((nr, 2), dtype=np.complex128)
        S = np.zeros((1, 2), dtype=np.complex128)
        Pow = np.zeros((len(Sx[0]), len(Sy[0]), df))
        for n in range(nr):
            r[n, :] = coord[n][0:2]

        freq = freq[value1:value2]

        for i in range(ny):
            for j in range(nx):
                S[0, 0] = Sx[0][j]
                S[0, 1] = Sy[0][i]
                k = (S * r)
                K = np.sum(k, axis=1)
                n = 0
                for f in freq:
                    A = np.exp(-1j * 2 * pi * f * K)
                    B = np.conjugate(np.transpose(A))
                    D = np.matmul(B, Cx[:, :, n]) / nr
                    P = np.matmul(D, A) / nr
                    Pow[i, j, n] = np.abs(P)
                    n = n + 1
        Pow = np.mean(Pow, axis=2)
        #Pow = Pow / len(freq)
        Pow = np.fliplr(Pow)
        x = y = np.linspace(smin, smax, nx)

        nn = len(x)
        maximum_power = np.where(Pow == np.amax(Pow))
        Sxpow = (maximum_power[1] - nn / 2) * sinc
        Sypow = (maximum_power[0] - nn / 2) * sinc

        return Pow, Sxpow, Sypow, coord
    stn = obspy.read(
        'https://raw.github.com/ashimrijal/NoiseCorrelation/master/data/noise.CI.MLAC.LHZ.2004.294.2005.017.mseed'
    )
    # get noise data for the station PHL and add it to the previous stream
    stn += obspy.read(
        'https://raw.github.com/ashimrijal/NoiseCorrelation/master/data/noise.CI.PHL.LHZ.2004.294.2005.017.mseed'
    )
    # if you have data stored locally, comment the stn = and stn += lines above
    # then uncomment the following 3 lines and adapt the path:
    # stn = obspy.read('./noise.CI.MLAC.LHZ.2004.294.2005.017.mseed')
    # stn += obspy.read('noise.CI.PHL.LHZ.2004.294.2005.017.mseed')
    # ste = obspy.read('event.CI.PHL.LHZ.1998.196.1998.196.mseed')
else:
    # download data from IRIS database
    client = Client("IRIS")  # client specification
    t1 = UTCDateTime("2004-10-20T00:00:00.230799Z")  # start UTC date/time
    t2 = t1 + (num_of_days * 86400)  # end UTC date/time
    stn = client.get_waveforms(network="CI",
                               station="MLAC",
                               location="*",
                               channel="*",
                               starttime=t1,
                               endtime=t2)  # get data for MLAC
    stn += client.get_waveforms(
        network="CI",
        station="PHL",
        location="*",
        channel="*",
        starttime=t1,
        endtime=t2)  # get data for PHL and add it to the previous stream
# -
Esempio n. 18
0
def swGetSacReferTime(kzdate, kztime):
    date = kzdate.replace('/', '-')
    eventtime = UTCDateTime(date + 'T' + kztime)

    return eventtime
Esempio n. 19
0
#newdf.insert(newdf.shape[1], "K_p{0:02d}h".format(int(lookforward/60)), pd.Series(np.array(future_K_p)))

#lookforward = 180*3 #In integer minutes
#future_K_p = newdf['K_p'].values
#future_K_p = np.roll(future_K_p, -lookforward)
#newdf.insert(newdf.shape[1], "K_p{0:02d}h".format(int(lookforward/60)), pd.Series(np.array(future_K_p)))

#lookforward = 180*4 #In integer minutes
#future_K_p = newdf['K_p'].values
#future_K_p = np.roll(future_K_p, -lookforward)
#newdf.insert(newdf.shape[1], "K_p{0:02d}h".format(int(lookforward/60)), pd.Series(np.array(future_K_p)))

#Add column for time of day
timeofday = np.zeros(newdf.shape[0])
for i in range(kindexdf.shape[0]):
    t = UTCDateTime(newdf['Date'].values[i])
    timeofday[i] = t.timestamp - (UTCDateTime(t.year, t.month, t.day)).timestamp
    
newdf.insert(newdf.shape[1], 'TimeOfDay', pd.Series(np.array(timeofday)))


# In[151]:




# In[73]:

newdf.dropna(how='any',axis=0,inplace=True)
newdf.values.max()
#newdf.dropna(subset=['Year'])
Esempio n. 20
0
 def origin(self):
     self.o = UTCDateTime(self.date + 'T' + self.time).timestamp
Esempio n. 21
0
                 color=viridis(1.0 * n / N))
        n = n + 2
    plt.xlabel('Period [s]')
    plt.ylabel('dB relative to unfiltered')
    plt.suptitle(stat.code + ' m' + str(mag) + ' dist=' + str(Edist) + 'km')
    plt.show()


model = TauPyModel(model="iasp91")
client = Client("IRIS")

# stuff to define for the FilterPiker
Tlong = 30  # a time averaging scale in seconds
domper = 20  # dominant period that you want to pick up to

starttime = UTCDateTime("2018-08-01")
endtime = UTCDateTime("2019-08-01")
# coordinates and radius of study area
lat = 34.9
lon = -106.5
rad = 1.5  # in degrees
#max sensor to event distance to analyze
max_epi_dist = 250
#minimum magnitude to analyze
minmag = 2.2

stas = "*"
nets = "IU"
chans = "HHZ,BHZ"

debug = True
Esempio n. 22
0
 def roundOrigin(self):  # 1970-02-09T00:12:34.999990Z
     origin = UTCDateTime(self.date + 'T' + self.time)
     origin = UTCDateTime(round(origin))
     self.date = origin.strftime("%Y-%m-%d")
     self.time = origin.strftime("%H:%M:%S")
     self.o = origin
Esempio n. 23
0
            # will be raised here
            conn = psycopg2.connect(conn_string)
            to_close = True
        except Exception, e:
            print "DB Connection Error %s" % e
            return None

    kwargs = {}

    if starttime is None:
        endtime = None
    else:
        if not isinstance(starttime, UTCDateTime):
            if isinstance(starttime, basestring):
                try:
                    starttime = UTCDateTime(starttime)
                    endtime = starttime + time_interval
                except ValueError:
                    starttime = endtime = None
            else:
                starttime = endtime = None
        else:
            endtime = starttime + time_interval

    kwargs['starttime'] = starttime
    kwargs['endtime'] = endtime
    kwargs['nearest_sample'] = True

    query_sql = """SELECT network, station, location, channel, file, path
    FROM default_waveform_channels
    INNER JOIN default_waveform_files ON
Esempio n. 24
0
 def originfmt(self):
     o = UTCDateTime(self.date + "T" + self.time)
     o = UTCDateTime(round(o, 3))
     self.date = o.strftime("%Y-%m-%d")
     self.time = o.strftime("%H:%M:%S.%f")[0:12]
Esempio n. 25
0
def wpinv(
    st: Stream,
    metadata: dict,
    event: Event,
    gfdir: str,
    OL: int = 1,
    processes: int = None,
):
    """
    This function is the main function that will compute the inversion. For
    details of the the algorithm and logic, see `here
    <https://pubs.er.usgs.gov/publication/70045140>`_.

    :param st: The waveform data to be used as raw input
    :type st: :py:class:`obspy.core.stream.Stream`

    :param dict metadata: Station metadata. Each key is a station ID and
        the values are a dictionaries with the metadata. Each dictionary
        should look like:

        .. code-block:: python

            {'azimuth': 0.0,
                'dip': -90.0,
                'elevation': 19.0,
                'gain': 59680600.0,
                'latitude': 2.0448,
                'longitude': -157.4457,
                'poles': [
                    (-0.035647-0.036879j),
                    (-0.035647+0.036879j),
                    (-251.33+0j),
                    (-131.04-467.29j),
                    (-131.04+467.29j)],
                'sensitivity': 33554000000.0,
                'transfer_function': 'A',
                'zeros': [0j, 0j]}

    :param dict eqINFO: A dictionary with the preliminary event information,
        namely with keys *lat*, *lon*, *time*, *mag* (optional), and *depth*.

    :param int OL: Output level of the inversion. 1 computes just the preliminary
         magnitude, 2 perform an inversion using PDE location and 3
         uses an optimized centroid's location.

    :return: What is returned depends on the processing level (*OL*), as described below.

        - OL = 1, a tuple with elements:
            0. Preliminary Mw magnitude of the event.
            #. List with the stations contributing to the preliminary
                magnitude. (epicentral order)
            #. List with the peak-to-peak amplitude (meters) of each station
                contributing to the preliminary magnitude (epicentral order).

        - OL = 2, a tuple with elements:
            0. Six component of the moment tensor in Nm,
                ['RR', 'PP', 'TT', 'TP' , 'RT', 'RP'] (ready to be plotted with
                Beachball from ObsPy).
            #. Array with the concatenated traces of observed displacements
                sorted according to epicentral distance.
            #. Array with the concatenated traces of synthetic seismograms
                sorted according to epicentral distance.
            #. List with the stations contributing to the final solution,
                sorted according to epicentral distance.
            #. List with the lengths of each trace in trlist. Note that
                sum(trlen) = len(observed_displacements) = len(syn).

        - OL = 3, Same as OL2 plus:
            5. Optimal centroid location (lat, lon, dep).
            #. Time delay/Half duration of the source (secs).
            #. latitude-longitude grid used to find the optimal centroid
                location
            #. Inversion result for each grid point in latlons
            #. Dictionary with relavant information about the data processing
                so it can redone easily outside this function.
    """
    #############Output Level 1#############################
    #############Preliminary magnitude:#####################

    # Ta and Tb give the corner periords in seconds of the band pass filter
    # used at this stage of processing.
    Ta = 200.
    Tb = 1000.

    # vector of periods to consider in the instrument response fitting.
    T = np.linspace(Ta,Tb,500)
    freq = 1./T

    # convert to angular frequency
    omega = freq*2.*np.pi

    # create a function to compute the amplitude of the response from poles and
    # zeros.
    Vpaz2freq = np.vectorize(paz_2_amplitude_value_of_freq_resp)

    hyplat = event.latitude
    hyplon = event.longitude
    hypdep = event.depth
    orig = UTCDateTime(event.time)

    # Deal with extremly shallow preliminary hypocenter
    if hypdep < 10.:
        hypdep = 10.

    #In case of multiple locations we favor
    # '00'. This may be improved if we select the one with the longer period
    # sensor.
    st_sel = st.select(location = '00')
    st_sel+= st.select(location = '--')
    st_sel+= st.select(location = '') #Check this.

    #st_sel = st_sel.select(component = 'Z')
    #We also want to select stations with one location code which is not the
    #default (see above).

    logger.info('Initial number of traces: using %d with default locations (of %d total)', len(st_sel), len(st))

    # rotate the horizontal components to geographical north or east
    st_sel = rot_12_NE(st_sel, metadata)

    # traces to use the preliminary mag calculation
    st_sel_prem_mag = st_sel.select(component = 'Z').copy()

    # will contain distances epicenter - station
    DIST = np.array([])

    # trlist  contains the station IDs. If a station is then rejected
    # it must be removed from this list.
    trlist = [tr.id for tr in st_sel]
    trlist_pre = []

    # Peak to peak amplitude of each trace.
    tr_p2p = []

    # List with the station azimuths
    AZI = []

    # Instrument deconvolution and bandpass filtering:
    i = 0
    for tr in st_sel_prem_mag:
        trmeta =  metadata[tr.id]
        trlat = trmeta['latitude']
        trlon = trmeta['longitude']
        tr.data = np.array(tr.data, dtype=float)

        # compute distance in degrees between 2 locations
        dist = locations2degrees(hyplat, hyplon, trlat, trlon)

        # compute azimuth from north
        azi = gps2dist_azimuth(hyplat, hyplon, trlat, trlon)[1]

        # if p-arrival time is not calculated, then calculate it.
        # WARNING: this can be very slow in new versions of obspy.
        t_p = trmeta.get('ptime')
        if not t_p:
            from obspy.taup.taup import getTravelTimes
            t_p =  getTravelTimes(dist,hypdep)[0]['time']

        # sample period in seconds
        dt = tr.stats.delta

        # Wphase (UTC) time window
        t1 = orig + t_p
        t2 = t1 + dist*settings.WPHASE_CUTOFF

        # accounting for the units of the transfer function in the instrument response... see README.
        if trmeta['transfer_function'] == "B":
            AmpfromPAZ  = Vpaz2freq(trmeta,freq/2./np.pi)  # hz to rad*hz
        elif trmeta['transfer_function'] == "A":
            AmpfromPAZ  = Vpaz2freq(trmeta,freq)
        else:
            logger.warning("Unknown transfer function. Skipping %s", tr.id)
            trlist.remove(tr.id)
            continue

        # Fitting the instrument response and getting coefficients
        response_coefficients = fit_instrument_response(
            trmeta['sensitivity'], freq, AmpfromPAZ
        )
        if response_coefficients is None:
            logger.warning("Impossible to get Coeff. Skipping %s", tr.id)
            trlist.remove(tr.id)
            continue
        else:
            om0, h, G = response_coefficients

        AmpfromCOEFF= np.abs(omega*omega / \
                (omega*omega + 2j*h*om0*omega - om0*om0))

        # L2 norm:
        misfit = 100*np.linalg.norm(AmpfromPAZ-AmpfromCOEFF) \
                / np.linalg.norm(AmpfromPAZ)

        if misfit > settings.RESPONSE_MISFIT_TOL:
            logger.warning('Bad fitting for response (misfit=%e). Skipping %s', misfit, tr.id)
            continue

        # tr.data will cointain the deconvolved and filtered displacements.
        try:
            tr.data, coeff = RTdeconv(
                tr,
                om0,
                h,
                G,
                dt,
                corners=4,
                baselinelen=60./dt,
                taperlen=10.,
                fmin=1./Tb,
                fmax=1./Ta,
                get_coef=True)

        except RTdeconvError as e:
            logger.warning("Error deconvolving trace %s: %s", tr.id, str(e))
            trlist.remove(tr.id)
            continue

        # trim to the Wphase time window
        tr.trim(t1,t2)
        if len(tr)== 0:
            logger.warning("Empty trace. Skipping %s", tr.id)
            trlist.remove(tr.id)
            continue

        trlist_pre.append(tr.id)
        tr_p2p.append(tr[:].max()-tr[:].min())
        AZI.append(azi)
        DIST = np.append(DIST, dist)
        i += 1

    # Sorting the IDs according to their distance to the source:
    sorted_indices = np.argsort(DIST)
    trlist_pre = [trlist_pre[i] for i in sorted_indices]
    tr_p2p = [tr_p2p[i] for i in sorted_indices]
    AZI = [AZI[i] for i in sorted_indices]
    DIST = np.sort(DIST)

    # Median rejection
    median_p2p = np.nanmedian(tr_p2p)
    mrcoeff = settings.MEDIAN_REJECTION_COEFF
    p2pmax, p2pmin = mrcoeff[1]*median_p2p, mrcoeff[0]*median_p2p
    accepted_traces = []
    for i, (trid, p2p) in enumerate(zip(trlist_pre, tr_p2p)):
        if np.isnan(p2p):
            logger.warning("P2P Amp for %s is NaN! Excluding.", trid)
        elif p2p > p2pmax:
            logger.info("P2P Amp for %s is too big (%.2e > %.2e). Excluding.", trid, p2p, p2pmax)
        elif p2p < p2pmin:
            logger.info("P2P Amp for %s is too small (%.2e < %.2e). Excluding.", trid, p2p, p2pmin)
        else:
            accepted_traces.append(i)

    gap = azimuthal_gap(AZI)
    if gap > settings.MAXIMUM_AZIMUTHAL_GAP:
        raise InversionError("Lack of azimuthal coverage (%.0f° > %.0f°). Aborting."
                             % (gap, settings.settings.MAXIMUM_AZIMUTHAL_GAP))
    if len(accepted_traces) < settings.MINIMUM_STATIONS:
        raise InversionError("Lack of stations (%d < %d). Aborting."
                             % (len(accepted_traces), settings.MINIMUM_STATIONS))

    logger.info("Traces accepted for preliminary magnitude calculation: {}"
                .format(len(accepted_traces)))

    # Get the preliminary mag:
    tr_p2p_con = [tr_p2p[i] for i in accepted_traces]
    DIST_con   = [DIST[i] for i in accepted_traces]
    AZI_con    = [AZI[i] for i in accepted_traces]
    trlist_pre_con = [trlist_pre[i] for i in accepted_traces]
    pre_results = preliminary_magnitude(tr_p2p_con, DIST_con, AZI_con, trlist_pre_con)

    pre_wp_mag = pre_results['magnitude']
    t_h = pre_results['t_h']
    logger.info("Preliminary t_h = %.2f" % t_h)

    logger.info("OL1:")
    logger.info("Average amplitude:  %.1em", pre_results['average_amplitude'])
    logger.info("Magnitude:          %.2f", pre_wp_mag)
    logger.info("Strike:             %.1f°", pre_results['strike'])
    logger.info("Eccentricity (b/a): %.2f", pre_results['eccentricity'])

    ol1 = OL1Result(
        preliminary_calc_details=pre_results,
        used_traces=trlist,
        nstations=len(accepted_traces),
        magnitude=round(pre_wp_mag, 1),
    )
    result = WPhaseResult(OL1=ol1, Event=event)
    if OL == 1:
        return result
    #############  Output Level 2    #######################################
    #############  Moment Tensor based on preliminary hypercenter (PDE) ####
    # Much of what follows is the same as what was done above, but we will be
    # using a different set of stations and can handle multiple components per
    # station (i.e. horizontals also)
    ########################################################################

    # Redefine and define some values according to the pre_wp_mag
    Ta, Tb = get_corner_freqs_from_mag(pre_wp_mag)
    logger.info("Filter corner periods: %.1f, %1f" % (Ta, Tb))
    T = np.linspace(Ta,Tb,500)
    freq = 1./T
    omega = freq*2.*np.pi

    # Build the Moment Rate Function (MRF):
    greens = GreensFunctions(gfdir)
    dt = greens.delta
    MRF = MomentRateFunction(t_h, dt)

    # Building a stream with the synthetics and the observed displacements vector
    tr_p2p = [] #Peak to peak amplitude of each trace.

    # Preparing the data:
    DIST = []
    DATA_INFO = {} #Minimum info to be able to filter the displacements afterwards
    for itr, trid in enumerate(trlist[:]):
        trmeta =  metadata[trid]
        trlat = trmeta['latitude']
        trlon = trmeta['longitude']
        dist = locations2degrees(hyplat, hyplon, trlat, trlon)
        t_p = trmeta.get('ptime')
        if not t_p:
            from obspy.taup.taup import getTravelTimes
            t_p =  getTravelTimes(dist,hypdep)[0]['time']
        dt = tr.stats.delta

        # W-phase time window
        t1 = orig + t_p
        t2 = t1 + dist*settings.WPHASE_CUTOFF
        tr = st_sel.select(id = trid)[0]

        tr.data = np.array(tr.data, dtype=float)
        tf = trmeta['transfer_function']
        if tf == "B":
            AmpfromPAZ  = Vpaz2freq(trmeta,freq/2./np.pi)  # hz to rad*hz
        elif tf == "A":
            AmpfromPAZ  = Vpaz2freq(trmeta,freq)
        else:
            logger.warning("%s: Unknown transfer function %s. Skipping this trace", tr.id, tf)
            trlist.remove(tr.id)
            continue

        response_coefficients = fit_instrument_response(
            trmeta['sensitivity'], freq, AmpfromPAZ
        )
        if response_coefficients is None:
            logger.warning("%s: Could not fit instrument response. Skipping this trace", tr.id)
            trlist.remove(tr.id)
            continue
        else:
            om0, h, G = response_coefficients

        DATA_INFO[tr.id] = [om0, h, G, dt, t1, t2]

        try:
            tr.data, coeff = RTdeconv(
                tr, om0, h, G, dt,
                corners=4,
                baselinelen=60./dt,
                taperlen= 10.,
                fmin = 1./Tb,
                fmax = 1./Ta,
                get_coef = True)

        except RTdeconvError as e:
            logger.warning("%s: Skipping due to error in deconvolution: %s", tr.id, str(e))
            trlist.remove(tr.id)
            continue

        #Check length of the trace:
        tr.trim(t1, t2)
        if len(tr) == 0:
            logger.warning("%s: Trace is empty. Skipping this trace", tr.id)
            trlist.remove(tr.id)
            continue
        tr_p2p.append(tr[:].max() - tr[:].min())

        DIST.append(dist)

    DIST = np.array(DIST)
    trlist = [trlist[i] for i in np.argsort(DIST)]
    tr_p2p = [tr_p2p[i] for i in np.argsort(DIST)]

    # Rejecting outliers:
    observed_displacements = np.array([]) # observed displacements vector.
    trlen = [] # A list with the length of each station data.
    trlist2 = trlist[:]

    mrcoeff = settings.MEDIAN_REJECTION_COEFF
    median_AMP = np.median(tr_p2p)
    for i, amp in enumerate(tr_p2p):
        if (amp > median_AMP*mrcoeff[1]
        or amp < median_AMP*mrcoeff[0]):
            trlist2.remove(trlist[i])
            logger.warning("%s: Amplitude is %.2fx the median, which is "
                           "outside our acceptable range of [%.2f, %.2f]. "
                           "Rejecting this trace.",
                           trlist[i],
                           amp/median_AMP,
                           mrcoeff[0],
                           mrcoeff[1])

        else:
            tr = st_sel.select(id = trlist[i])[0]
            observed_displacements =  np.append(observed_displacements, tr.data[:],0)
            trlen.append(len(tr))
    trlist = trlist2[:]

    logger.info('number of traces for OL2: %d', len(trlist))

    #### Inversion:
    # Search for the optimal t_d
    ##I need to test the optimal way to do this. map is taking too much time

    # time delays for which we will run inversions (finally choosing the one with
    # lowest misfit)
    time_delays = np.arange(1., settings.MAXIMUM_TIME_DELAY)

    # extract the greens function matrix for all time delays. Note that this does not
    # perform an inversion, because OnlyGetFullGF=True.
    GFmatrix = core_inversion(
        0,
        (hyplat, hyplon, hypdep),
        (Ta, Tb),
        MRF,
        observed_displacements,
        trlen,
        metadata,
        trlist,
        gfdir=gfdir,
        OnlyGetFullGF=True,
        max_t_d=settings.MAXIMUM_TIME_DELAY,
    )

    # inputs for the TIME DELAY search
    inputs = [(t_d_test, GFmatrix, trlen, observed_displacements, settings.MAXIMUM_TIME_DELAY) for t_d_test in time_delays]
    with ProcessPoolExecutor(max_workers=processes) as pool:
        misfits = list(pool.map(get_timedelay_misfit_wrapper,inputs))

    # Set t_d (time delay) and and t_h (half duration) to optimal values:
    mis_min = int(np.array(misfits).argmin())
    result.misfits = TimeDelayMisfits(array=misfits, min=mis_min)
    t_d = t_h = time_delays[mis_min]
    MRF = MomentRateFunction(t_h, dt)
    logger.info("Source time function, time delay: %d, %f", len(MRF), t_d)
    logger.info("revised t_h = %.2f" % t_h)

    #### Removing individual bad fitting. this recursively removes stations with misfits
    # outside of the acceptable range defined by the setting MISFIT_TOL_SEQUENCE.
    M, misfit, GFmatrix = core_inversion(
        t_d,
        (hyplat, hyplon, hypdep),
        (Ta, Tb),
        MRF,
        observed_displacements,
        trlen,
        metadata,
        trlist,
        gfdir=gfdir,
        return_gfs=True)

    # Remove bad traces
    for tol in settings.MISFIT_TOL_SEQUENCE:
        # Make sure there are enough channels
        GFmatrix, observed_displacements, trlist, trlen = remove_individual_traces(
            tol,
            M,
            GFmatrix,
            observed_displacements,
            trlist,
            trlen)

        if len(trlist) < settings.MINIMUM_FITTING_CHANNELS:
            msg = "Only {} channels with possibly acceptable fits. Aborting.".format(len(trlist))
            logger.error(msg)
            raise InversionError(msg)

        M, misfit, GFmatrix = core_inversion(
            t_d,
            (hyplat, hyplon, hypdep),
            (Ta,Tb),
            MRF,
            observed_displacements,
            trlen,
            metadata,
            trlist,
            gfdir=gfdir,
            return_gfs=True
        )

    syn = (M[0]*GFmatrix[:,0] + M[1]*GFmatrix[:,1] +
           M[3]*GFmatrix[:,2] + M[4]*GFmatrix[:,3] +
           M[5]*GFmatrix[:,4])


    trace_lengths = list(zip(trlist, trlen))
    result.OL2 = make_result(
        OL2Result,
        M,
        misfit=misfit,
        depth=hypdep,
        time_delay=t_d,
        used_traces=trlist,
        moment_tensor=M,
        observed_displacements=observed_displacements,
        synthetic_displacements=syn,
        trace_lengths=OrderedDict(trace_lengths),
    )

    if len(trlen) == 0:
        logger.warning("Could not calculate OL3: no data within tolerance")
        return result
    elif OL == 2:
        return result

    #############  Output Level 3   #############################
    ###Moment Tensor based on grid search  ######################
    ###for the optimal centroid's location #####################

    logger.info("Performing grid search for best centroid location.")
    lat_grid, lon_grid = get_latlon_for_grid(hyplat, hyplon, dist_lat=3.0,
                                             dist_lon=3.0, delta=0.8)
    logger.debug("Grid size: %d * %d", len(lon_grid), len(lat_grid))
    latlons = [(lat, lon) for lat in lat_grid for lon in lon_grid]

    grid_search_inputs = [
        (t_d, (lat, lon, hypdep), (Ta, Tb), MRF,
         observed_displacements, trlen, metadata, trlist, greens)
        for lat, lon in latlons
    ]

    i_grid, _, _, grid_search_results = minimize_misfit(
        grid_search_inputs,
        processes=processes
    )
    cenlat, cenlon = latlons[i_grid]

    logger.info("Performing grid search for best depth.")
    deps_grid = get_depths_for_grid(hypdep, greens)
    logger.debug("Depth grid size: %d", len(deps_grid))

    depth_search_inputs = [
        (t_d, (cenlat, cenlon, depth), (Ta,Tb),
         MRF, observed_displacements, trlen, metadata, trlist, greens)
        for depth in deps_grid
    ]

    i_dep, _, _, _ = minimize_misfit(
        depth_search_inputs,
        processes=processes
    )
    cendep = deps_grid[i_dep]

    ### Final inversion!!
    # While we already inverted at this centroid location during the depth
    # search, we threw away the corresponding synthetic waveforms; so we need
    # to re-run the inversion to get them.

    M, misfit, GFmatrix = core_inversion(t_d, (cenlat, cenlon, cendep),
                                         (Ta,Tb), MRF, observed_displacements,
                                         trlen, metadata, trlist, gfdir=gfdir,
                                         return_gfs=True)

    syn = (M[0]*GFmatrix[:,0] + M[1]*GFmatrix[:,1]
          + M[3]*GFmatrix[:,2] + M[4]*GFmatrix[:,3]
          + M[5]*GFmatrix[:,4])


    trace_lengths = list(zip(trlist, trlen))

    result.OL3 = make_result(
        OL3Result,
        M,
        misfit=misfit,
        depth=cendep,
        time_delay=t_d,
        centroid=(cenlat, cenlon, cendep),
        used_traces=trlist,
        moment_tensor=M,
        observed_displacements=observed_displacements,
        synthetic_displacements=syn,
        trace_lengths=OrderedDict(trace_lengths),
        grid_search_candidates=[row[1] for row in grid_search_inputs],
        grid_search_results=grid_search_results,
    )
    return result
Esempio n. 26
0
def ga2oat(arrfile, oatfile, stafile):
    with open(arrfile, 'r') as fp:
        lst = fp.readlines()

    with open(stafile, 'r') as fp:
        stlst = fp.readlines()

    stalst = []
    for line in stlst:
        row = line.split()
        nwnm = row[0].strip()
        stnm = row[1].strip()
        stla = float(row[2])
        stlo = float(row[3])
        stel = float(row[4])
        tmp = [nwnm, stnm, stla, stlo, stel]
        stalst.append(tmp)

    fp = open(oatfile, 'w')

    oat = Oat()

    month = {
        'January': 1,
        'February': 2,
        'March': 3,
        'April': 4,
        'May': 5,
        'June': 6,
        'July': 7,
        'August': 8,
        'September': 9,
        'October': 10,
        'November': 11,
        'December': 12
    }

    k = 0
    for line in lst:
        k = k + 1
        print k, len(lst)
        if line.strip() == "":
            continue

        row = line.split()
        try:
            oat.mag = float(row[0])
            day = row[1]
            mon = str(month[row[2]]).zfill(2)
            year = row[3]
            oat.time = row[5]
            oat.date = year + '-' + mon + '-' + day
            oat.evla = float(row[11])
            oat.evlo = float(row[12])
            oat.evdp = float(row[13])

        except:
            if row[0] != '-':
                oat.kstnm = row[0]
            for i in range(len(row)):
                if row[i] == 'P' or row[i] == 'S':
                    break

            oat.phase = row[i]

            date = row[i + 1]
            time = row[i + 2]

            tt = date.split('/')
            date = tt[2] + '-' + tt[1] + '-' + tt[0]
            oat.tobs = UTCDateTime(date + 'T' +
                                   time) - UTCDateTime(oat.date + 'T' +
                                                       oat.time)

            for i in range(len(stalst)):
                if oat.kstnm == stalst[i][1]:
                    oat.stla = stalst[i][2]
                    oat.stlo = stalst[i][3]
                    oat.stel = stalst[i][4]
                    oat.knetwk = stalst[i][0]
                    break

            if oat.stla == -1.0:
                continue

            oat.DistAz()
            if oat.phase[0] == 'P' or oat.phase[0] == 'p':
                oat.ttak135pf(phase_list=['P', 'Pn', 'Pg', 'p'])
            elif oat.phase[0] == 'S' or oat.phase[0] == 's':
                oat.ttak135sf(phase_list=['S', 'Sn', 'Sg', 's'])

            oat.write(fp)

    fp.close()
Esempio n. 27
0
def preprocess(db, stations, comps, goal_day, params, responses=None):
    """
    Fetches data for each ``stations`` and each ``comps`` using the
    data_availability table in the database.

    To correct for instrument responses, make sure to set ``remove_response``
    to "Y" in the config and to provide the ``responses`` DataFrame.

    :Example:

    >>> from msnoise.api import connect, get_params, preload_instrument_responses
    >>> from msnoise.preprocessing import preprocess
    >>> db = connect()
    >>> params = get_params(db)
    >>> responses = preload_instrument_responses(db)
    >>> st = preprocess(db, ["YA.UV06","YA.UV10"], ["Z",], "2010-09-01", params, responses)
    >>> st
     2 Trace(s) in Stream:
    YA.UV06.00.HHZ | 2010-09-01T00:00:00.000000Z - 2010-09-01T23:59:59.950000Z | 20.0 Hz, 1728000 samples
    YA.UV10.00.HHZ | 2010-09-01T00:00:00.000000Z - 2010-09-01T23:59:59.950000Z | 20.0 Hz, 1728000 samples

    :type db: :class:`sqlalchemy.orm.session.Session`
    :param db: A :class:`~sqlalchemy.orm.session.Session` object, as
        obtained by :func:`msnoise.api.connect`.
    :type stations: list of str
    :param stations: a list of station names, in the format NET.STA.
    :type comps: list of str
    :param comps: a list of component names, in Z,N,E,1,2.
    :type goal_day: str
    :param goal_day: the day of data to load, ISO 8601 format: e.g. 2016-12-31.
    :type params: class
    :param params: an object containing the config parameters, as obtained by
        :func:`msnoise.api.get_params`.
    :type responses: :class:`pandas.DataFrame`
    :param responses: a DataFrame containing the instrument responses, as
        obtained by :func:`msnoise.api.preload_instrument_responses`.
    :rtype: :class:`obspy.core.stream.Stream`
    :return: A Stream object containing all traces.
    """
    datafiles = {}
    output = Stream()
    MULTIPLEX = False
    MULTIPLEX_files = {}
    for station in stations:
        datafiles[station] = {}
        net, sta, loc = station.split('.')
        gd = datetime.datetime.strptime(goal_day, '%Y-%m-%d')
        files = get_data_availability(db,
                                      net=net,
                                      sta=sta,
                                      loc=loc,
                                      starttime=gd,
                                      endtime=gd)
        for comp in comps:
            datafiles[station][comp] = []
        for file in files:
            if file.sta != "MULTIPLEX":
                if file.chan[-1] not in comps:
                    continue
                fullpath = os.path.join(file.path, file.file)
                datafiles[station][file.chan[-1]].append(fullpath)
            else:
                MULTIPLEX = True
                print("Mutliplex mode, reading the files")
                fullpath = os.path.join(file.path, file.file)
                multiplexed = sorted(glob.glob(fullpath))
                for comp in comps:
                    for fn in multiplexed:
                        if fn in MULTIPLEX_files:
                            _ = MULTIPLEX_files[fn]
                        else:
                            # print("Reading %s" % fn)
                            _ = read(fn, format=params.archive_format or None)
                            traces = []
                            for tr in _:
                                if "%s.%s" % (
                                        tr.stats.network, tr.stats.station
                                ) in stations and tr.stats.channel[-1] in comps:
                                    traces.append(tr)
                            del _
                            _ = Stream(traces=traces)
                            MULTIPLEX_files[fn] = _
                        datafiles[station][comp].append(_)

    for istation, station in enumerate(stations):
        net, sta, loc = station.split(".")
        for comp in comps:
            files = eval("datafiles['%s']['%s']" % (station, comp))
            if len(files) != 0:
                logger.debug("%s.%s Reading %i Files" %
                             (station, comp, len(files)))
                traces = []
                for file in files:
                    if isinstance(file, Stream):
                        st = file.select(network=net,
                                         station=sta,
                                         component=comp).copy()
                    else:
                        try:
                            # print("Reading %s" % file)
                            # t=  time.time()
                            st = read(file,
                                      dytpe=np.float,
                                      starttime=UTCDateTime(gd),
                                      endtime=UTCDateTime(gd) + 86400,
                                      station=sta,
                                      format=params.archive_format or None)
                            # print("done in", time.time()-t)
                        except:
                            logger.debug("ERROR reading file %s" % file)
                            # TODO add traceback (optional?)
                            continue
                    for tr in st:
                        if len(tr.stats.channel) == 2:
                            tr.stats.channel += tr.stats.location
                            tr.stats.location = "00"
                    tmp = st.select(network=net, station=sta, component=comp)
                    if not len(tmp):
                        for tr in st:
                            tr.stats.network = net
                        st = st.select(network=net,
                                       station=sta,
                                       component=comp)
                    else:
                        st = tmp
                    for tr in st:
                        tr.data = tr.data.astype(np.float)
                        tr.stats.network = tr.stats.network.upper()
                        tr.stats.station = tr.stats.station.upper()
                        tr.stats.channel = tr.stats.channel.upper()

                        traces.append(tr)
                    del st
                stream = Stream(traces=traces)
                if not (len(stream)):
                    continue
                f = io.BytesIO()
                stream.write(f, format='MSEED')
                f.seek(0)
                stream = read(f, format="MSEED")

                stream.sort()
                # try:
                #     # HACK not super clean... should find a way to prevent the
                #     # same trace id with different sps to occur
                #     stream.merge(method=1, interpolation_samples=3, fill_value=None)
                # except:
                #     logger.debug("Error while merging...")
                #     traceback.print_exc()
                #     continue
                # stream = stream.split()
                if not len(stream):
                    continue
                logger.debug("%s Checking sample alignment" % stream[0].id)
                for i, trace in enumerate(stream):
                    stream[i] = check_and_phase_shift(
                        trace, params.preprocess_taper_length)

                logger.debug("%s Checking Gaps" % stream[0].id)
                if len(getGaps(stream)) > 0:
                    max_gap = params.preprocess_max_gap * stream[
                        0].stats.sampling_rate

                    gaps = getGaps(stream)
                    while len(gaps):
                        too_long = 0
                        for gap in gaps:
                            if int(gap[-1]) <= max_gap:
                                try:
                                    stream[gap[0]] = stream[gap[0]].__add__(
                                        stream[gap[1]],
                                        method=1,
                                        fill_value="interpolate")
                                    stream.remove(stream[gap[1]])
                                except:
                                    stream.remove(stream[gap[1]])

                                break
                            else:
                                too_long += 1

                        if too_long == len(gaps):
                            break
                        gaps = getGaps(stream)
                    del gaps

                stream = stream.split()
                for tr in stream:
                    if tr.stats.sampling_rate < (params.goal_sampling_rate -
                                                 1):
                        stream.remove(tr)
                taper_length = params.preprocess_taper_length  # seconds
                for trace in stream:
                    if trace.stats.npts < (4 * taper_length *
                                           trace.stats.sampling_rate):
                        stream.remove(trace)
                    else:
                        trace.detrend(type="demean")
                        trace.detrend(type="linear")
                        trace.taper(max_percentage=None,
                                    max_length=taper_length)

                if not len(stream):
                    logger.debug(" has only too small traces, skipping...")
                    continue

                for trace in stream:
                    logger.debug("%s Highpass at %.2f Hz" %
                                 (trace.id, params.preprocess_highpass))
                    trace.filter("highpass",
                                 freq=params.preprocess_highpass,
                                 zerophase=True,
                                 corners=4)

                    if trace.stats.sampling_rate != params.goal_sampling_rate:
                        logger.debug("%s Lowpass at %.2f Hz" %
                                     (trace.id, params.preprocess_lowpass))
                        trace.filter("lowpass",
                                     freq=params.preprocess_lowpass,
                                     zerophase=True,
                                     corners=8)

                        if params.resampling_method == "Resample":
                            logger.debug("%s Downsample to %.1f Hz" %
                                         (trace.id, params.goal_sampling_rate))
                            trace.data = resample(
                                trace.data, params.goal_sampling_rate /
                                trace.stats.sampling_rate, 'sinc_fastest')

                        elif params.resampling_method == "Decimate":
                            decimation_factor = trace.stats.sampling_rate / params.goal_sampling_rate
                            if not int(decimation_factor) == decimation_factor:
                                logger.warning(
                                    "%s CANNOT be decimated by an integer factor, consider using Resample or Lanczos methods"
                                    " Trace sampling rate = %i ; Desired CC sampling rate = %i"
                                    % (trace.id, trace.stats.sampling_rate,
                                       params.goal_sampling_rate))
                                sys.stdout.flush()
                                sys.exit()
                            logger.debug("%s Decimate by a factor of %i" %
                                         (trace.id, decimation_factor))
                            trace.data = trace.data[::int(decimation_factor)]

                        elif params.resampling_method == "Lanczos":
                            logger.debug("%s Downsample to %.1f Hz" %
                                         (trace.id, params.goal_sampling_rate))
                            trace.data = np.array(trace.data)
                            trace.interpolate(
                                method="lanczos",
                                sampling_rate=params.goal_sampling_rate,
                                a=1.0)

                        trace.stats.sampling_rate = params.goal_sampling_rate
                    del trace

                if params.remove_response:
                    logger.debug('%s Removing instrument response' %
                                 stream[0].id)
                    try:
                        stream.attach_response(responses)
                        stream.remove_response(
                            pre_filt=params.response_prefilt, taper=False)
                    except:
                        logger.error("Bad or no instrument response "
                                     "information for %s, skipping" %
                                     stream[0].id)
                        continue

                for tr in stream:
                    tr.data = tr.data.astype(np.float32)
                    if tr.stats.location == "":
                        tr.stats.location = "--"
                output += stream
                del stream
            del files

    del MULTIPLEX_files
    return output
Esempio n. 28
0
def isc2oat(infile, oatfile, distaz=False, tt=False):

    with open(infile, "r") as fp:
        lst = fp.readlines()

    fp = open(oatfile, "w")
    oat = Oat()

    for line in lst:
        row = line.split(',')

        try:
            evid = int(row[0].strip())
        except:
            continue

        oat.__init__()
        oat.kstnm = row[2].strip()
        oat.stla = float(row[3].strip())
        oat.stlo = float(row[4].strip())
        oat.stel = float(row[5].strip())
        oat.kcmpnm = row[6].strip()
        oat.gcarc = float(row[7].strip())
        oat.baz = float(row[8].strip())
        oat.phase = row[10].strip()
        if oat.phase == "":
            continue

        date = row[11].strip()
        time = row[12].strip()
        tobs = UTCDateTime(date + "T" + time)

        res = float(row[13].strip())

        date = row[18].strip()
        time = row[19].strip()
        orgi = UTCDateTime(date + "T" + time)

        oat.tobs = tobs - orgi
        oat.tak135 = oat.tobs - res

        oat.date = date
        oat.time = time
        oat.originfmt()

        oat.evla = float(row[20].strip())
        oat.evlo = float(row[21].strip())

        oat.dist = oat.gcarc * 111.195

        try:
            oat.evdp = float(row[22].strip())
        except:
            pass

        try:
            oat.mag = float(row[25].strip())
        except:
            continue

        if distaz:
            oat.DistAz()

        if tt:
            if oat.phase[0] == 'P' or oat.phase[0] == 'p':
                oat.ttak135pf(phase_list=['P', 'Pn', 'Pg', 'p'])
            elif oat.phase[0] == 'S' or oat.phase[0] == 's':
                oat.ttak135sf(phase_list=['S', 'Sn', 'Sg', 's'])

        oat.write(fp)

    fp.close()
Esempio n. 29
0
#mpl.rc('text', usetex = True)
mpl.rc('font', size=18)

times = []
ws = []
ls = []
wg = []
lg = []
sig2 = []
resi = []

fig = plt.figure(1, figsize=(12, 12))
idx = 0
for line in f:
    line = line.split(', ')
    time = UTCDateTime('19' + line[3] + '-' + line[1] + '-' + line[2] +
                       'T00:00:00')
    times.append(time.year + float(time.julday) / 365.25)
    ws = float(line[5])
    ls = float(line[6])
    wg = float(line[7])
    lg = float(line[8])
    sig2 = float(line[9])
    #resi.append(float(line[11]))
    gain = 1.
    num = [gain, 0.]
    den = [
        1., 2 * ls * ws + 2 * lg * wg,
        ws**2 + wg**2 + 4. * ls * ws * lg * wg * (1 - sig2),
        2 * ls * ws * wg**2 + 2 * lg * wg * ws**2, (ws**2) * (wg**2)
    ]
    w, h = freqs(num, den, worN=np.logspace(-3, 1, 1000))
    def trace_to_inventory(self, trace):
        # if sac files are opened, it's useful to extract inventory from their streams so that we can populate the
        # stations tabs and the location widget
        new_inventory = None

        # The next bit is modified from the obspy webpage on building a stationxml site from scratch
        # https://docs.obspy.org/tutorial/code_snippets/stationxml_file_from_scratch.html
        #
        # We'll first create all the various objects. These strongly follow the
        # hierarchy of StationXML files.
        # initialize the lat/lon/ele
        lat = 0.0
        lon = 0.0
        ele = -1.0

        _network = trace.stats['network']
        _station = trace.stats['station']
        _channel = trace.stats['channel']
        _location = trace.stats['location']

        # if the trace is from a sac file, the sac header might have some inventory information
        if trace.stats['_format'] == 'SAC':

            if 'stla' in trace.stats['sac']:
                lat = trace.stats['sac']['stla']

            if 'stlo' in trace.stats['sac']:
                lon = trace.stats['sac']['stlo']

            if 'stel' in trace.stats['sac']:
                ele = trace.stats['sac']['stel']
            else:
                ele = 0.333

        if _network == 'LARSA' and _station == '121':
            if _channel == 'ai0':
                lat = 35.8492497
                lon = -106.2705465
            elif _channel == 'ai1':
                lat = 35.84924682
                lon = -106.2705505
            elif _channel == 'ai2':
                lat = 35.84925165
                lon = -106.2705516

        if lat == 0.0 or lon == 0.0 or ele < 0:
            if self.fill_sta_info_dialog.exec_(_network, _station, _location,
                                               _channel, lat, lon, ele):

                edited_values = self.fill_sta_info_dialog.get_values()

                lat = edited_values['lat']
                lon = edited_values['lon']
                ele = edited_values['ele']

                _network = edited_values['net']
                _station = edited_values['sta']
                _location = edited_values['loc']
                _channel = edited_values['cha']

                # (re)populate sac headers where possible
                if trace.stats['_format'] == 'SAC':
                    trace.stats['sac']['stla'] = lat
                    trace.stats['sac']['stlo'] = lon
                    trace.stats['sac']['stel'] = ele
                    trace.stats['sac']['knetwk'] = _network
                    trace.stats['sac']['kstnm'] = _station
                # (re)populate trace stats where possible
                trace.stats['network'] = _network
                trace.stats['station'] = _station
                trace.stats['location'] = _location
                trace.stats['channel'] = _channel
        try:
            new_inventory = Inventory(
                # We'll add networks later.
                networks=[],
                # The source should be the id whoever create the file.
                source="InfraView")

            net = Network(
                # This is the network code according to the SEED standard.
                code=_network,
                # A list of stations. We'll add one later.
                stations=[],
                # Description isn't something that's in the trace stats or SAC header, so lets set it to the network cod
                description=_network,
                # Start-and end dates are optional.

                # Start and end dates for the network are not stored in the sac header so lets set it to 1/1/1900
                start_date=UTCDateTime(1900, 1, 1))

            sta = Station(
                # This is the station code according to the SEED standard.
                code=_station,
                latitude=lat,
                longitude=lon,
                elevation=ele,
                # Creation_date is not saved in the trace stats or sac header
                creation_date=UTCDateTime(1900, 1, 1),
                # Site name is not in the trace stats or sac header, so set it to the site code
                site=Site(name=_station))

            # This is the channel code according to the SEED standard.
            cha = Channel(
                code=_channel,
                # This is the location code according to the SEED standard.
                location_code=_location,
                # Note that these coordinates can differ from the station coordinates.
                latitude=lat,
                longitude=lon,
                elevation=ele,
                depth=0.0)

            # Now tie it all together.
            # cha.response = response
            sta.channels.append(cha)
            net.stations.append(sta)
            new_inventory.networks.append(net)

            return new_inventory

        except ValueError:
            bad_values = ""
            if lon < -180 or lon > 180:
                bad_values = bad_values + "\tlon = " + str(lon) + "\n"
            if lat < -90 or lat > 90:
                bad_values = bad_values + "\tlat = " + str(lat)
            self.errorPopup("There seems to be a value error in " + _network +
                            "." + _station + "." + _channel +
                            "\nPossible bad value(s) are:\n" + bad_values)