コード例 #1
0
ファイル: test_utcdatetime.py プロジェクト: avuan/obspy
 def test_weekday(self):
     """
     Tests weekday method.
     """
     dt = UTCDateTime(2008, 10, 1, 12, 30, 35, 45020)
     self.assertEqual(dt.weekday, 2)
     self.assertEqual(dt._getWeekday(), 2)
コード例 #2
0
ファイル: test_utcdatetime.py プロジェクト: avuan/obspy
 def test_toordinal(self):
     """
     Short test if toordinal() is working.
     Matplotlib's date2num() function depends on this which is used a lot in
     plotting.
     """
     dt = UTCDateTime("2012-03-04T11:05:09.123456Z")
     self.assertEqual(dt.toordinal(), 734566)
コード例 #3
0
ファイル: misfit_new.py プロジェクト: taotaokai/sem_utils
  def read_srcfrechet(self, filename=None, update=False):
    """ Read in source derivative of misfit function
        Dchi/Dxs, Dchi/Dmt
    """
    with open(filename, 'r') as f:
      lines = [ x for x in f.readlines() if not(x.startswith('#')) ]

    lines = [x.split() for x in lines]

    t0  = float(lines[0][0]);  dchi_dt0  = float(lines[0][1])
    tau = float(lines[1][0]);  dchi_dtau = float(lines[1][1])
    x   = float(lines[2][0]);  dchi_dx   = float(lines[2][1])
    y   = float(lines[3][0]);  dchi_dy   = float(lines[3][1])
    z   = float(lines[4][0]);  dchi_dz   = float(lines[4][1])
    mxx = float(lines[5][0]);  dchi_dmxx = float(lines[5][1])
    myy = float(lines[6][0]);  dchi_dmyy = float(lines[6][1])
    mzz = float(lines[7][0]);  dchi_dmzz = float(lines[7][1])
    mxy = float(lines[8][0]);  dchi_dmxy = float(lines[8][1])
    mxz = float(lines[9][0]);  dchi_dmxz = float(lines[9][1])
    myz = float(lines[10][0]); dchi_dmyz = float(lines[10][1])

    dchi_dxs = np.array([dchi_dx, dchi_dy, dchi_dz])

    dchi_dmt = np.zeros((3,3))
    dchi_dmt[0,0] = dchi_dmxx
    dchi_dmt[1,1] = dchi_dmyy
    dchi_dmt[2,2] = dchi_dmzz
    dchi_dmt[0,1] = dchi_dmxy
    dchi_dmt[1,0] = dchi_dmxy
    dchi_dmt[0,2] = dchi_dmxz
    dchi_dmt[2,0] = dchi_dmxz
    dchi_dmt[1,2] = dchi_dmyz
    dchi_dmt[2,1] = dchi_dmyz

    # check if the same as event info
    data = self.data
    event = data['event']
    #...

    # record 
    src_frechet = {
        't0':dchi_dt0,
        'tau':dchi_dtau,
        'xs':dchi_dxs,
        'mt':dchi_dmt,
        'stat': {'code':0, 'msg':"created on "+UTCDateTime.now().isoformat()}
        }

    if 'src_frechet' not in data:
      data['src_frechet'] = src_frechet
    elif update:
      data['src_frechet'].update(src_frechet)
      data['src_frechet']['stat']['code'] = 1
      data['src_frechet']['stat']['msg'] = "updated on "+UTCDateTime.now().isoformat()
    else:
      raise Exception('src_frechet already set, not updated.')
コード例 #4
0
ファイル: test_utcdatetime.py プロジェクト: andreww/obspy
    def test_format_iris_webservice(self):
        """
        Tests the format IRIS webservice function.

        See issue #1096.
        """
        # These are parse slightly differently (1 microsecond difference but
        # the IRIS webservice string should be identical as its only
        # accurate to three digits.
        d1 = UTCDateTime(2011, 1, 25, 15, 32, 12.26)
        d2 = UTCDateTime("2011-01-25T15:32:12.26")

        self.assertEqual(d1.format_iris_web_service(), d2.format_iris_web_service())
コード例 #5
0
ファイル: cm.py プロジェクト: YongYuPku/obspy
def _colormap_plot_beamforming_time(cmaps):
    """
    Plot for illustrating colormaps: beamforming.

    :param cmaps: list of :class:`~matplotlib.colors.Colormap`
    :rtype: None
    """
    import matplotlib.pyplot as plt
    import matplotlib.dates as mdates

    from obspy import UTCDateTime
    from obspy.signal.array_analysis import array_processing

    # Execute array_processing
    stime = UTCDateTime("20080217110515")
    etime = UTCDateTime("20080217110545")
    kwargs = dict(
        # slowness grid: X min, X max, Y min, Y max, Slow Step
        sll_x=-3.0, slm_x=3.0, sll_y=-3.0, slm_y=3.0, sl_s=0.03,
        # sliding window properties
        win_len=1.0, win_frac=0.05,
        # frequency properties
        frqlow=1.0, frqhigh=8.0, prewhiten=0,
        # restrict output
        semb_thres=-1e9, vel_thres=-1e9, timestamp='mlabday',
        stime=stime, etime=etime
    )
    st = _get_beamforming_example_stream()
    out = array_processing(st, **kwargs)
    # Plot
    labels = ['rel.power', 'abs.power', 'baz', 'slow']
    xlocator = mdates.AutoDateLocator()
    for cmap in cmaps:
        fig = plt.figure()
        for i, lab in enumerate(labels):
            ax = fig.add_subplot(4, 1, i + 1)
            ax.scatter(out[:, 0], out[:, i + 1], c=out[:, 1], alpha=0.6,
                       edgecolors='none', cmap=cmap)
            ax.set_ylabel(lab)
            ax.set_xlim(out[0, 0], out[-1, 0])
            ax.set_ylim(out[:, i + 1].min(), out[:, i + 1].max())
            ax.xaxis.set_major_locator(xlocator)
            ax.xaxis.set_major_formatter(mdates.AutoDateFormatter(xlocator))
        fig.suptitle('AGFA skyscraper blasting in Munich %s' % (
            stime.strftime('%Y-%m-%d'), ))
        fig.autofmt_xdate()
        fig.subplots_adjust(left=0.15, top=0.95, right=0.95, bottom=0.2,
                            hspace=0)
    plt.show()
コード例 #6
0
ファイル: utils.py プロジェクト: CSchwarz1234/obspy
def DateTime2String(dt, compact=False):
    """
    Generates a valid SEED time string from a UTCDateTime object.
    """
    if isinstance(dt, UTCDateTime):
        return dt.formatSEED(compact)
    elif isinstance(dt, basestring):
        dt = dt.strip()
    if not dt:
        return ""
    try:
        dt = UTCDateTime(dt)
        return dt.formatSEED(compact)
    except:
        raise Exception("Invalid datetime %s: %s" % (type(dt), str(dt)))
コード例 #7
0
ファイル: test_utcdatetime.py プロジェクト: andreww/obspy
 def test_to_python_date_time_objects(self):
     """
     Tests getDate, getTime, getTimestamp and getDateTime methods.
     """
     dt = UTCDateTime(1970, 1, 1, 12, 23, 34, 456789)
     # as function
     self.assertEqual(dt._get_date(), datetime.date(1970, 1, 1))
     self.assertEqual(dt._get_time(), datetime.time(12, 23, 34, 456789))
     self.assertEqual(dt._get_datetime(), datetime.datetime(1970, 1, 1, 12, 23, 34, 456789))
     self.assertAlmostEqual(dt._get_timestamp(), 44614.456789)
     # as property
     self.assertEqual(dt.date, datetime.date(1970, 1, 1))
     self.assertEqual(dt.time, datetime.time(12, 23, 34, 456789))
     self.assertEqual(dt.datetime, datetime.datetime(1970, 1, 1, 12, 23, 34, 456789))
     self.assertAlmostEqual(dt.timestamp, 44614.456789)
コード例 #8
0
ファイル: utils.py プロジェクト: junlysky/obspy
def datetime_2_string(dt, compact=False):
    """
    Generates a valid SEED time string from a UTCDateTime object.
    """
    if isinstance(dt, UTCDateTime):
        return dt.format_seed(compact)
    elif isinstance(dt, (str, native_str)):
        dt = dt.strip()
    if not dt:
        return ""
    try:
        dt = UTCDateTime(dt)
        return dt.format_seed(compact)
    except Exception:
        raise Exception("Invalid datetime %s: %s" % (type(dt), str(dt)))
コード例 #9
0
ファイル: evt_base.py プロジェクト: vcoeur61/evt
 def _time(self, blocktime, param, val, offset):
     """
     change a EVT time format to Obspy UTCDateTime format
     :param blocktime : time in sec after 1980/1/1
     :param param: parameter with milliseconds values (in val)
     :param val: list of value
     :param offset: Not used
     """
     frame_time = blocktime
     if param > 0:
         frame_milli = val[param-offset]
     else:
         frame_milli = 0
     frame_time += 315532800  # diff between 1970/1/1 and 1980/1/1
     time = UTCDateTime(frame_time) + frame_milli/1000.0
     time.precison = 3
     return time
コード例 #10
0
ファイル: utils.py プロジェクト: wjlei1990/pytomo3d
def create_simple_inventory(
    network,
    station,
    latitude=None,
    longitude=None,
    elevation=None,
    depth=None,
    start_date=None,
    end_date=None,
    location_code="S3",
    channel_code="MX",
):
    """
    Create simple inventory with only location information,
    for ZNE component, especially usefull for synthetic data
    """
    azi_dict = {"MXZ": 0.0, "MXN": 0.0, "MXE": 90.0}
    dip_dict = {"MXZ": 90.0, "MXN": 0.0, "MXE": 0.0}
    channel_list = []

    if start_date is None:
        start_date = UTCDateTime(0)

    # specfem default channel code is MX
    for _comp in ["Z", "E", "N"]:
        _chan_code = "%s%s" % (channel_code, _comp)
        chan = Channel(
            _chan_code,
            location_code,
            latitude=latitude,
            longitude=longitude,
            elevation=elevation,
            depth=depth,
            azimuth=azi_dict[_chan_code],
            dip=dip_dict[_chan_code],
            start_date=start_date,
            end_date=end_date,
        )
        channel_list.append(chan)

    site = Site("N/A")
    sta = Station(
        station,
        latitude=latitude,
        longitude=longitude,
        elevation=elevation,
        channels=channel_list,
        site=site,
        creation_date=start_date,
        total_number_of_channels=3,
        selected_number_of_channels=3,
    )

    nw = Network(network, stations=[sta], total_number_of_stations=1, selected_number_of_stations=1)

    inv = Inventory([nw], source="SPECFEM3D_GLOBE", sender="Princeton", created=UTCDateTime.now())

    return inv
コード例 #11
0
ファイル: util.py プロジェクト: junlysky/obspy
def _parse_long_time(time_bytestring, decode=True):
    if decode:
        time_string = time_bytestring.decode()
    else:
        time_string = time_bytestring
    if not time_string.strip():
        return None
    time_string, milliseconds = time_string[:-3], int(time_string[-3:])
    return (UTCDateTime.strptime(time_string, '%Y%j%H%M%S') +
            1e-3 * milliseconds)
コード例 #12
0
ファイル: util.py プロジェクト: Brtle/obspy
def _parse_long_time(time_bytestring, decode=True):
    """
    :returns: POSIX timestamp as integer nanoseconds
    """
    if decode:
        time_string = time_bytestring.decode()
    else:
        time_string = time_bytestring
    if not time_string.strip():
        return None
    time_string, milliseconds = time_string[:-3], int(time_string[-3:])
    t = UTCDateTime.strptime(time_string, '%Y%j%H%M%S')
    nanoseconds = t._ns
    nanoseconds += milliseconds * 1000000
    return nanoseconds
コード例 #13
0
ファイル: test_utcdatetime.py プロジェクト: avuan/obspy
 def test_year_2038_problem(self):
     """
     See issue #805
     """
     dt = UTCDateTime(2004, 1, 10, 13, 37, 4)
     self.assertEqual(dt.__str__(), '2004-01-10T13:37:04.000000Z')
     dt = UTCDateTime(2038, 1, 19, 3, 14, 8)
     self.assertEqual(dt.__str__(), '2038-01-19T03:14:08.000000Z')
     dt = UTCDateTime(2106, 2, 7, 6, 28, 16)
     self.assertEqual(dt.__str__(), '2106-02-07T06:28:16.000000Z')
コード例 #14
0
ファイル: convert.py プロジェクト: wjlei1990/pypaw
def add_stationxml_to_asdf(ds, staxml_filelist, event=None,
                           create_simple_inv=False, sta_dict=None,
                           status_bar=False):
    # Add StationXML files.
    if create_simple_inv:
        if event is None:
            start_date = UTCDateTime.now()
        else:
            origin = event.preferred_origin() or event.origins[0]
            event_time = origin.time
            start_date = event_time - 300.0
        nstaxml = len(sta_dict)
        count = 0
        for tag, value in sta_dict.iteritems():
            count += 1
            inv = create_simple_inventory(
                value[0], value[1], latitude=value[2], longitude=value[3],
                elevation=value[4], depth=value[5], start_date=start_date)
            ds.add_stationxml(inv)
            if status_bar > 0:
                drawProgressBar((count)/nstaxml,
                                "Adding StationXML(created) data")
    else:
        nstaxml = len(staxml_filelist)
        if staxml_filelist is not None and nstaxml > 0:
            for _i, filename in enumerate(staxml_filelist):
                if not os.path.exists(filename):
                    raise ValueError("Staxml not exist %i of %i: %s"
                                     % (_i, nstaxml, filename))
                try:
                    ds.add_stationxml(filename)
                except Exception as err:
                    print("Error convert(%s) due to:%s" % (filename, err))
                if status_bar > 0:
                    drawProgressBar((_i+1)/nstaxml, "Adding StationXML data")
        else:
            print("No stationxml added")
コード例 #15
0
ファイル: test_utcdatetime.py プロジェクト: jshridha/obspy
 def test_utcnow(self):
     """
     Test utcnow class method of UTCDateTime class.
     """
     dt = UTCDateTime()
     self.assertGreaterEqual(UTCDateTime.utcnow(), dt)
コード例 #16
0
def findFirstArrivals(starttime, st, plot_checkcalcs=False):
    """
    Transforms each trace in obspy stream object to determine the "first arrival",
    or onset of the trigger that might be a landslide. Returns time of this onset
    for each trace, as well as the signal index corresponding to this time and 
    the transformed traces for plotting. Adapts methodology from Baillard et al 
    (2014) paper on using kurtosis to pick P- and S-wave onset-times.
    INPUTS
    starttime (UTCDateTime) - starting time of stream object
    st - obspy stream object with seismic information
    plot_checkcalcs (boolean) - optional, set to True to visualize calculations
        at each step of signal transformation process
    OUTPUTS
    F4_traces (2D numpy array) - traces from stream object after fourth and final transformation 
        step, useful for plotting first arrival times
    arrival_times (list of UTCDateTimes) - first arrival times for each trace;
        if multiple arrivals, algorithm selects arrival closest to arrival time
        of first trace (first, first arrival time picked for first trace)
    arrival_i (list of numpy int64s) - indices of first arrival times within
        traces
    """

    arrival_times = []
    arrival_i = []

    max_length = 0
    for trace in st:
        if len(trace) > max_length:
            max_length = len(trace)

    F4_traces = np.zeros((len(st), max_length))

    # Iterate through every channel in stream
    for t in range(0, len(st)):
        # Take kurtosis of trace
        F1 = signal.kurtosis(st[t], win=5000.0)

        # Change first part of signal to avoid initial noise
        F1[:1000] = F1[1000]

        # Remove negative slopes
        F2 = np.zeros(len(F1))
        F2[0] = F1[0]
        for i in range(0, len(F1)):
            dF = F1[i] - F1[i - 1]
            if dF >= 0:
                d = 1
            else:
                d = 0
            F2[i] = F2[i - 1] + (d * dF)

        # Remove linear trend
        F3 = np.zeros(len(F2))
        b = F2[0]
        a = (F2[-1] - b) / (len(F2) - 1)
        for i in range(0, len(F2)):
            F3[i] = F2[i] - (a * i + b)

        # Smooth F3 curve
        F3smooth = spsignal.savgol_filter(F3, 501, 1)
        # Define lists of maxima and minima
        M = []  # maxima
        M_i = []  # maxima indices
        m = []  # minima
        m_i = []  # minima indices
        for i in range(1, len(F3smooth) - 1):
            if F3smooth[i] > F3smooth[i - 1] and F3smooth[i] > F3smooth[i + 1]:
                M.append(F3smooth[i])
                M_i.append(i)
            if F3smooth[i] < F3smooth[i - 1] and F3smooth[i] < F3smooth[i + 1]:
                m.append(F3smooth[i])
                m_i.append(i)
        M.append(0)
        M_i.append(len(F3smooth))
        if len(m_i) == 0:
            m_i.append(np.argmin(F3smooth))

        # Scale amplitudes based on local maxima
        F4 = np.zeros(len(F3smooth))
        Mlist = []
        for i in range(0, len(F3smooth)):
            # Find next maximum
            for j in reversed(range(0, len(M))):
                if i <= M_i[j]:
                    thisM = M[j]
            if i < m_i[0]:
                thisM = F3[i]
            Mlist.append(thisM)

            # Calculate difference between F3 value and next maximum
            T = F3smooth[i] - thisM

            # Calculate new signal
            if T < 0:
                F4[i] = T
            else:
                F4[i] = 0

            if len(M) > 1:
                for j in range(1, len(m)):
                    if i < m_i[j] and i > M_i[j - 1]:
                        F4[i] = 0

        # Plot each step
        if plot_checkcalcs:
            plt.figure()

            plt.subplot(511)
            plt.title('Station = ' + st[t].stats.station)
            plt.plot(st[t].data)

            plt.subplot(512)
            plt.plot(F1)

            plt.subplot(513)
            plt.plot(F2)

            plt.subplot(514)
            plt.plot(F3)
            plt.plot(F3smooth, 'r')

            plt.subplot(515)
            plt.plot(F4)

            plt.show()

        sample_rate = st[t].stats.sampling_rate

        # Find first arrival time

        # First find how many spikes were detected
        spike_values = np.where(F4 < min(F4) * .2)[0]
        mins = [spike_values[0]]
        not_mins = [spike_values[1]]
        for i in range(2, len(spike_values)):
            # Find next nonconsecutive index and store in list
            if spike_values[i] != (mins[-1] + 1) and spike_values[i] != (
                    not_mins[-1] + 1):
                mins.append(spike_values[i])
            else:
                not_mins.append(spike_values[i])

        # Set minimum that is closest in time to previous station's arrival time
        # but AFTER it as arrival time
        if len(mins) > 1 and t > 0:
            closest_min = mins[0]
            for i in range(1, len(mins)):
                if abs(arrival_times[t-1] - UTCDateTime(mins[i]/sample_rate + \
                   starttime.timestamp)) < abs(arrival_times[t-1] - \
                       UTCDateTime(closest_min/sample_rate + starttime.timestamp)):
                    closest_min = mins[i]
            arrival_index = closest_min
        else:
            arrival_index = mins[0]

        F4_traces[t] = np.interp(range(0, max_length), range(0, len(F4)), F4)
        arrival_i.append(arrival_index)  # Index of first arrival time
        arrival_timedelta = arrival_index / sample_rate
        arrival_times.append(
            UTCDateTime(arrival_timedelta + starttime.timestamp))

    return (F4_traces, arrival_times, arrival_i)
コード例 #17
0
ファイル: plot_response.py プロジェクト: ogalanis/HTdataless
import matplotlib.pyplot as plt
import dateutil.parser
import sys
from obspy import read_inventory, UTCDateTime

if (len(sys.argv) < 2 or len(sys.argv) > 3):
    sys.exit("Usage: python {:s} station_code [datetime]".format(sys.argv[0]))
sta = sys.argv[1]

if (len(sys.argv) > 2):
    try:
        t0 = dateutil.parser.parse(sys.argv[2])
    except:
        print "Datetime could not be parsed. Using current date and time"
        t0 = UTCDateTime.now()
else:
    t0 = UTCDateTime.now()

t0str = t0.isoformat()

try:
    inv = read_inventory("../HTdataless_AUTh/HT.{:s}.dataless".format(sta),
                         format="SEED")
except:
    sys.exit("No response information found for station {:s}".format(sta))

inv = inv.select(channel="*Z", time=t0)

if not inv:
    sys.exit("No response information found for station {:s} on {:s}".format(
コード例 #18
0
ファイル: getIIdata.py プロジェクト: aringler-usgs/getIIdata
	def __init__(self, year, startday, network, **kwargs):
		# initialize year/start/net
		# if statement to check for main args set QUERY=True	
		# else sys.exit(1)
		if (year != "") and (startday != "") and (network != ""):	
			self.year = year
			self.startday = startday
			self.network = network
			QUERY = True
		else:
			QUERY = False

		# loop through **kwargs and initialize optargs
		self.endday = ""	# init endday string
		self.station = "" 	# init station string
		self.location = ""	# init location string
		self.channel = ""	# init channel string
		self.debug = False	# init debug
		self.archive = False	# init archive
		endday = self.endday
		for key,val in kwargs.iteritems(): 
			if key == "endday": self.endday = val
			elif key == "station": self.station = val
			elif key == "location": self.location = val
			elif key == "channel": self.channel = val
			elif key == "debug": self.debug = self.toBool(val)
			elif key == "archive": self.archive = self.toBool(val) 

		# print arguments if 'debug' mode
		if self.debug:
			print "Year: " + self.year
			print "Start Day: " + self.startday
			print "End Day: " + self.endday
			print "Network: " + self.network
			print "Station: " + self.station
			print "Location: " + self.location
			print "Channel: " + self.channel

		# handle wildcards
		if self.location == "?":
			self.location = "*"
		if self.channel == "?":
			self.channel = "*"
		if self.station == "?":
			self.station = "*"

		# set start/end to UTCDateTime object
		#--------------------------------------------------------------------
		self.startTime = UTCDateTime(year + startday +"T00:00:00.000")
		# If no end day in parser default to 1 day
		if self.endday == "?":
			self.endday = str(int(self.startday) + 1).zfill(3)
			self.endTime = self.startTime + 24*60*60
		else:
			self.endTime = UTCDateTime(year + self.endday +"T00:00:00.000")
		print "Here is our start time: " + self.startTime.formatIRISWebService()
		print "Here is our end time:   " + self.endTime.formatIRISWebService()
		self.days = int(self.endday)- int(self.startday)
		# there are 24, 1 hour increments in a day
		self.hours = (int(self.endday)- int(self.startday)) * 24 
		# Will only run if main args are given
		# check QUERY flag if True continue
		if QUERY:
			self.queryData()
		else:
			print '\nNo main args given.'
			print 'Exiting\n'
			sys.exit(1)
コード例 #19
0
ファイル: core.py プロジェクト: QuLogic/obspy
def _read_single_hypocenter(lines, coordinate_converter, original_picks):
    """
    Given a list of lines (starting with a 'NLLOC' line and ending with a
    'END_NLLOC' line), parse them into an Event.
    """
    try:
        # some paranoid checks..
        assert lines[0].startswith("NLLOC ")
        assert lines[-1].startswith("END_NLLOC")
        for line in lines[1:-1]:
            assert not line.startswith("NLLOC ")
            assert not line.startswith("END_NLLOC")
    except Exception:
        msg = ("This should not have happened, please report this as a bug at "
               "https://github.com/obspy/obspy/issues.")
        raise Exception(msg)

    indices_phases = [None, None]
    for i, line in enumerate(lines):
        if line.startswith("PHASE "):
            indices_phases[0] = i
        elif line.startswith("END_PHASE"):
            indices_phases[1] = i

    # extract PHASES lines (if any)
    if any(indices_phases):
        if not all(indices_phases):
            msg = ("NLLOC HYP file seems corrupt, 'PHASE' block is corrupt.")
            raise RuntimeError(msg)
        i1, i2 = indices_phases
        lines, phases_lines = lines[:i1] + lines[i2 + 1:], lines[i1 + 1:i2]
    else:
        phases_lines = []

    lines = dict([line.split(None, 1) for line in lines[:-1]])
    line = lines["SIGNATURE"]

    line = line.rstrip().split('"')[1]
    signature, version, date, time = line.rsplit(" ", 3)
    # new NLLoc > 6.0 seems to add prefix 'run:' before date
    if date.startswith('run:'):
        date = date[4:]
    signature = signature.strip()
    creation_time = UTCDateTime.strptime(date + time, str("%d%b%Y%Hh%Mm%S"))

    if coordinate_converter:
        # maximum likelihood origin location in km info line
        line = lines["HYPOCENTER"]
        x, y, z = coordinate_converter(*map(float, line.split()[1:7:2]))
    else:
        # maximum likelihood origin location lon lat info line
        line = lines["GEOGRAPHIC"]
        y, x, z = map(float, line.split()[8:13:2])

    # maximum likelihood origin time info line
    line = lines["GEOGRAPHIC"]

    year, mon, day, hour, min = map(int, line.split()[1:6])
    seconds = float(line.split()[6])
    time = UTCDateTime(year, mon, day, hour, min, seconds, strict=False)

    # distribution statistics line
    line = lines["STATISTICS"]
    covariance_xx = float(line.split()[7])
    covariance_yy = float(line.split()[13])
    covariance_zz = float(line.split()[17])
    stats_info_string = str(
        "Note: Depth/Latitude/Longitude errors are calculated from covariance "
        "matrix as 1D marginal (Lon/Lat errors as great circle degrees) "
        "while OriginUncertainty min/max horizontal errors are calculated "
        "from 2D error ellipsoid and are therefore seemingly higher compared "
        "to 1D errors. Error estimates can be reconstructed from the "
        "following original NonLinLoc error statistics line:\nSTATISTICS " +
        lines["STATISTICS"])

    # goto location quality info line
    line = lines["QML_OriginQuality"].split()

    (assoc_phase_count, used_phase_count, assoc_station_count,
     used_station_count, depth_phase_count) = map(int, line[1:11:2])
    stderr, az_gap, sec_az_gap = map(float, line[11:17:2])
    gt_level = line[17]
    min_dist, max_dist, med_dist = map(float, line[19:25:2])

    # goto location quality info line
    line = lines["QML_OriginUncertainty"]

    if "COMMENT" in lines:
        comment = lines["COMMENT"].strip()
        comment = comment.strip('\'"')
        comment = comment.strip()

    hor_unc, min_hor_unc, max_hor_unc, hor_unc_azim = \
        map(float, line.split()[1:9:2])

    # assign origin info
    event = Event()
    o = Origin()
    event.origins = [o]
    event.preferred_origin_id = o.resource_id
    o.origin_uncertainty = OriginUncertainty()
    o.quality = OriginQuality()
    ou = o.origin_uncertainty
    oq = o.quality
    o.comments.append(Comment(text=stats_info_string, force_resource_id=False))
    event.comments.append(Comment(text=comment, force_resource_id=False))

    # SIGNATURE field's first item is LOCSIG, which is supposed to be
    # 'Identification of an individual, institiution or other entity'
    # according to
    # http://alomax.free.fr/nlloc/soft6.00/control.html#_NLLoc_locsig_
    # so use it as author in creation info
    event.creation_info = CreationInfo(creation_time=creation_time,
                                       version=version,
                                       author=signature)
    o.creation_info = CreationInfo(creation_time=creation_time,
                                   version=version,
                                   author=signature)

    # negative values can appear on diagonal of covariance matrix due to a
    # precision problem in NLLoc implementation when location coordinates are
    # large compared to the covariances.
    o.longitude = x
    try:
        o.longitude_errors.uncertainty = kilometer2degrees(sqrt(covariance_xx))
    except ValueError:
        if covariance_xx < 0:
            msg = ("Negative value in XX value of covariance matrix, not "
                   "setting longitude error (epicentral uncertainties will "
                   "still be set in origin uncertainty).")
            warnings.warn(msg)
        else:
            raise
    o.latitude = y
    try:
        o.latitude_errors.uncertainty = kilometer2degrees(sqrt(covariance_yy))
    except ValueError:
        if covariance_yy < 0:
            msg = ("Negative value in YY value of covariance matrix, not "
                   "setting longitude error (epicentral uncertainties will "
                   "still be set in origin uncertainty).")
            warnings.warn(msg)
        else:
            raise
    o.depth = z * 1e3  # meters!
    o.depth_errors.uncertainty = sqrt(covariance_zz) * 1e3  # meters!
    o.depth_errors.confidence_level = 68
    o.depth_type = str("from location")
    o.time = time

    ou.horizontal_uncertainty = hor_unc
    ou.min_horizontal_uncertainty = min_hor_unc
    ou.max_horizontal_uncertainty = max_hor_unc
    # values of -1 seem to be used for unset values, set to None
    for field in ("horizontal_uncertainty", "min_horizontal_uncertainty",
                  "max_horizontal_uncertainty"):
        if ou.get(field, -1) == -1:
            ou[field] = None
        else:
            ou[field] *= 1e3  # meters!
    ou.azimuth_max_horizontal_uncertainty = hor_unc_azim
    ou.preferred_description = str("uncertainty ellipse")
    ou.confidence_level = 68  # NonLinLoc in general uses 1-sigma (68%) level

    oq.standard_error = stderr
    oq.azimuthal_gap = az_gap
    oq.secondary_azimuthal_gap = sec_az_gap
    oq.used_phase_count = used_phase_count
    oq.used_station_count = used_station_count
    oq.associated_phase_count = assoc_phase_count
    oq.associated_station_count = assoc_station_count
    oq.depth_phase_count = depth_phase_count
    oq.ground_truth_level = gt_level
    oq.minimum_distance = kilometer2degrees(min_dist)
    oq.maximum_distance = kilometer2degrees(max_dist)
    oq.median_distance = kilometer2degrees(med_dist)

    # go through all phase info lines
    for line in phases_lines:
        line = line.split()
        arrival = Arrival()
        o.arrivals.append(arrival)
        station = str(line[0])
        phase = str(line[4])
        arrival.phase = phase
        arrival.distance = kilometer2degrees(float(line[21]))
        arrival.azimuth = float(line[23])
        arrival.takeoff_angle = float(line[24])
        arrival.time_residual = float(line[16])
        arrival.time_weight = float(line[17])
        pick = Pick()
        # network codes are not used by NonLinLoc, so they can not be known
        # when reading the .hyp file.. to conform with QuakeML standard set an
        # empty network code
        wid = WaveformStreamID(network_code="", station_code=station)
        # have to split this into ints for overflow to work correctly
        date, hourmin, sec = map(str, line[6:9])
        ymd = [int(date[:4]), int(date[4:6]), int(date[6:8])]
        hm = [int(hourmin[:2]), int(hourmin[2:4])]
        t = UTCDateTime(*(ymd + hm), strict=False) + float(sec)
        pick.waveform_id = wid
        pick.time = t
        pick.time_errors.uncertainty = float(line[10])
        pick.phase_hint = phase
        pick.onset = ONSETS.get(line[3].lower(), None)
        pick.polarity = POLARITIES.get(line[5].lower(), None)
        # try to determine original pick for each arrival
        for pick_ in original_picks:
            wid = pick_.waveform_id
            if station == wid.station_code and phase == pick_.phase_hint:
                pick = pick_
                break
        else:
            # warn if original picks were specified and we could not associate
            # the arrival correctly
            if original_picks:
                msg = ("Could not determine corresponding original pick for "
                       "arrival. "
                       "Falling back to pick information in NonLinLoc "
                       "hypocenter-phase file.")
                warnings.warn(msg)
        event.picks.append(pick)
        arrival.pick_id = pick.resource_id

    event.scope_resource_ids()

    return event
コード例 #20
0
import matplotlib.pylab as plt
%matplotlib inline
from datetime import datetime
import os
import calendar
import urllib

# %% codecell
# Load Client
client = Client(webservice)
print(client)

if not os.path.exists(CMT2idagrn_path):
    os.makedirs(CMT2idagrn_path)
# %% codecell
t1 = UTCDateTime(tstart)
t2 = UTCDateTime(tend)

# Load events from GCMT catalogue using IRIS SPUD
url_query = 'http://ds.iris.edu/spudservice/momenttensor/ids?' \
           +'evtstartdate='+t1.strftime('%Y-%m-%dT%H:%M:%S') \
           +'&evtenddate='+t2.strftime('%Y-%m-%dT%H:%M:%S') \
           +'&evtminmag='+str(minmagnitude)
evids = urllib.request.urlopen(url_query)
events_str = '&'.join([line.decode("utf-8").replace("\n", "") for line in evids])+'&'
url_ndk = 'http://ds.iris.edu/spudservice/momenttensor/bundleids/ndk?'+events_str
cat_evts = obspy.read_events(url_ndk)
if iscentroid: # Use centroid parameters
    ortype = 'centroid'
else: # Use hypocenter parameters
    ortype = 'hypocenter'
コード例 #21
0
ファイル: fnetmt.py プロジェクト: Brtle/obspy
def _internal_read_single_fnetmt_entry(line, **kwargs):
    """
    Reads a single F-net moment tensor solution to a
    :class:`~obspy.core.event.Event` object.

    :param line: String containing moment tensor information.
    :type line: str.
    """

    a = line.split()
    try:
        ot = UTCDateTime.strptime(a[0], '%Y/%m/%d,%H:%M:%S.%f')
    except ValueError:
        ot = UTCDateTime.strptime(a[0], '%Y/%m/%d,%H:%M:%S')
    lat, lon, depjma, magjma = map(float, a[1:5])
    depjma *= 1000
    region = a[5]
    strike = tuple(map(int, a[6].split(';')))
    dip = tuple(map(int, a[7].split(';')))
    rake = tuple(map(int, a[8].split(';')))
    mo = float(a[9])
    depmt = float(a[10]) * 1000
    magmt = float(a[11])
    var_red = float(a[12])
    mxx, mxy, mxz, myy, myz, mzz, unit = map(float, a[13:20])

    event_name = util.gen_sc3_id(ot)
    e = Event(event_type="earthquake")
    e.resource_id = _get_resource_id(event_name, 'event')

    # Standard JMA solution
    o_jma = Origin(time=ot, latitude=lat, longitude=lon,
                   depth=depjma, depth_type="from location",
                   region=region)
    o_jma.resource_id = _get_resource_id(event_name,
                                         'origin', 'JMA')
    m_jma = Magnitude(mag=magjma, magnitude_type='ML',
                      origin_id=o_jma.resource_id)
    m_jma.resource_id = _get_resource_id(event_name,
                                         'magnitude', 'JMA')
    # MT solution
    o_mt = Origin(time=ot, latitude=lat, longitude=lon,
                  depth=depmt, region=region,
                  depth_type="from moment tensor inversion")
    o_mt.resource_id = _get_resource_id(event_name,
                                        'origin', 'MT')
    m_mt = Magnitude(mag=magmt, magnitude_type='Mw',
                     origin_id=o_mt.resource_id)
    m_mt.resource_id = _get_resource_id(event_name,
                                        'magnitude', 'MT')
    foc_mec = FocalMechanism(triggering_origin_id=o_jma.resource_id)
    foc_mec.resource_id = _get_resource_id(event_name,
                                           "focal_mechanism")
    nod1 = NodalPlane(strike=strike[0], dip=dip[0], rake=rake[0])
    nod2 = NodalPlane(strike=strike[1], dip=dip[1], rake=rake[1])
    nod = NodalPlanes(nodal_plane_1=nod1, nodal_plane_2=nod2)
    foc_mec.nodal_planes = nod

    tensor = Tensor(m_rr=mxx, m_tt=myy, m_pp=mzz, m_rt=mxy, m_rp=mxz, m_tp=myz)
    cm = Comment(text="Basis system: North,East,Down (Jost and \
    Herrmann 1989")
    cm.resource_id = _get_resource_id(event_name, 'comment', 'mt')
    mt = MomentTensor(derived_origin_id=o_mt.resource_id,
                      moment_magnitude_id=m_mt.resource_id,
                      scalar_moment=mo, comments=[cm],
                      tensor=tensor, variance_reduction=var_red)
    mt.resource_id = _get_resource_id(event_name,
                                      'moment_tensor')
    foc_mec.moment_tensor = mt
    e.origins = [o_jma, o_mt]
    e.magnitudes = [m_jma, m_mt]
    e.focal_mechanisms = [foc_mec]
    e.preferred_magnitude_id = m_mt.resource_id.id
    e.preferred_origin_id = o_mt.resource_id.id
    e.preferred_focal_mechanism_id = foc_mec.resource_id.id

    e.scope_resource_ids()

    return e
コード例 #22
0
def slant_stack(eq_num, plot_scale_fac = 0.05, slowR_lo = -0.1, slowR_hi = 0.1, stack_option = 1,
            slow_delta = 0.0005, start_buff = -50, end_buff = 50,
            ref_lat = 36.3, ref_lon = 138.5, ref_loc = 0, envelope = 1, plot_dyn_range = 1000,
            log_plot = 1, norm = 1, global_norm_plot = 1, color_plot = 1, fig_index = 401, ARRAY = 0):

#%% Import functions
    import obspy
    import obspy.signal
    from obspy import UTCDateTime
    from obspy import Stream, Trace
    from obspy import read
    from obspy.geodetics import gps2dist_azimuth
    import numpy as np
    import os
    from obspy.taup import TauPyModel
    import obspy.signal as sign
    import matplotlib.pyplot as plt
    from matplotlib.colors import LogNorm
    model = TauPyModel(model='iasp91')
    from scipy.signal import hilbert
    import math
    import time
    from termcolor import colored

    env_stack = 0  # flag to stack envelopes instead of oscillating seismograms

#    import sys # don't show any warnings
#    import warnings

    print(colored('Running pro5a_stack', 'cyan'))

#%% Get saved event info, also used to name files
    start_time_wc = time.time()

    fname = '/Users/vidale/Documents/Research/IC/EvLocs/event' + str(eq_num) + '.txt'
    file = open(fname, 'r')
    lines=file.readlines()

    split_line = lines[0].split()
#            ids.append(split_line[0])  ignore label for now
    t           = UTCDateTime(split_line[1])
    date_label  = split_line[1][0:10]
    ev_lat      = float(      split_line[2])
    ev_lon      = float(      split_line[3])
    ev_depth    = float(      split_line[4])

    #if not sys.warnoptions:
    #    warnings.simplefilter("ignore")

#%% Get station location file
    if ARRAY == 0: # Hinet set and center
        sta_file = '/Users/vidale/Documents/GitHub/Array_codes/Files/sta_hinet.txt'
        if ref_loc == 0:
            ref_lat = 36.3
            ref_lon = 138.5
    elif ARRAY == 1: # LASA set and center
        sta_file = '/Users/vidale/Documents/GitHub/Array_codes/Files/sta_LASA.txt'
        if ref_loc == 0:
            ref_lat = 46.69
            ref_lon = -106.22
    elif ARRAY == 2: # China set and center
        sta_file = '/Users/vidale/Documents/GitHub/Array_codes/Files/sta_ch.txt'
        if ref_loc == 0:
            ref_lat = 38      # °N
            ref_lon = 104.5   # °E
    else:         # NORSAR set and center
        sta_file = '/Users/vidale/Documents/GitHub/Array_codes/Files/sta_NORSAR.txt'
        if ref_loc == 0:
            ref_lat = 61
            ref_lon = 11
    with open(sta_file, 'r') as file:
        lines = file.readlines()
    print('    ' + str(len(lines)) + ' stations of metadata read from ' + sta_file)
    # Load station coords into arrays
    station_index = range(len(lines))
    st_names = []
    st_lats  = []
    st_lons  = []
    for ii in station_index:
        line = lines[ii]
        split_line = line.split()
        st_names.append(split_line[0])
        st_lats.append( split_line[1])
        st_lons.append( split_line[2])
    if ARRAY == 0:  # shorten and make upper case Hi-net station names to match station list
        for ii in station_index:
            this_name = st_names[ii]
            this_name_truc = this_name[0:5]
            st_names[ii]  = this_name_truc.upper()

#%% Name file, read data
    # date_label = '2018-04-02' # date for filename
    fname = 'HD' + date_label + 'sel.mseed'
    goto = '/Users/vidale/Documents/Research/IC/Pro_Files'
    os.chdir(goto)

    # fname = '/Users/vidale/Documents/PyCode/Pro_Files/HD' + date_label + 'sel.mseed'

    st = Stream()
    print('        reading ' + fname)
    print('        Stack option is ' + str(stack_option))
    st = read(fname)
    print('    ' + str(len(st)) + ' traces read in')
    nt = len(st[0].data)
    dt = st[0].stats.delta
    print(f'        First trace has {nt} time pts, time sampling of {dt:.2f} and thus duration of {(nt-1)*dt:.0f} and max amp of {max(abs(st[0].data)):.1f}')
    print(f'st[0].stats.starttime-t {(st[0].stats.starttime-t):.2f} start_buff {start_buff:.2f}')

#%% Build Stack arrays
    stack = Stream()
    tr = Trace()
    tr.stats.delta = dt
    tr.stats.network = 'stack'
    tr.stats.channel = 'BHZ'
    slow_n = int(1 + (slowR_hi - slowR_lo)/slow_delta)  # number of slownesses
    stack_nt = int(1 + ((end_buff - start_buff)/dt))  # number of time points
    # In English, stack_slows = range(slow_n) * slow_delta - slowR_lo
    a1 = range(slow_n)
    stack_slows = [(x * slow_delta + slowR_lo) for x in a1]
    print('        ' + str(slow_n) + ' slownesses.')
    tr.stats.starttime = t + start_buff
    # print(f'tr.stats.starttime-t {(tr.stats.starttime-t):.2f} start_buff {start_buff:.2f}')
    tr.data = np.zeros(stack_nt)
    done = 0
    for stack_one in stack_slows:
        tr1 = tr.copy()
        tr1.stats.station = str(int(done))
        stack.extend([tr1])
        done += 1
    #    stack.append([tr])
    #    stack += tr

    #  Only need to compute ref location to event distance once
    ref_distance = gps2dist_azimuth(ev_lat,ev_lon,ref_lat,ref_lon)

#%% Select traces by distance, window and adjust start time to align picked times
    done = 0
    if env_stack == 1: #convert oscillating seismograms to envelopes
        for tr in st:
            tr.data = np.abs(hilbert(tr.data))

    for tr in st: # traces one by one
        if tr.stats.station in st_names:  # find station in station list
            ii = st_names.index(tr.stats.station)
            if norm == 1:
                tr.normalize()
            stalat = float(st_lats[ii])
            stalon = float(st_lons[ii]) # look up lat & lon again to find distance
            distance = gps2dist_azimuth(stalat,stalon,ev_lat,ev_lon) # Get traveltimes again, hard to store
            tr.stats.distance=distance[0] # distance in m
            del_dist = (ref_distance[0] - distance[0])/(1000) # in km
            rel_start_buff = tr.stats.starttime - (t + start_buff)
            print(f'{tr.stats.station} del_dist {del_dist:.2f} ref_dist {ref_distance[0]/1000.:.2f} distance {distance[0]/1000.:.2f} rel_start_buff {rel_start_buff:.2f} tr.stats.starttime-t {(tr.stats.starttime-t):.2f} start_buff {start_buff:.2f}')

            for slow_i in range(slow_n):  # for this station, loop over slownesses
                time_lag = -del_dist * stack_slows[slow_i]  # time shift due to slowness, flipped to match 2D
                time_correction = (rel_start_buff + time_lag)/dt
                # print(f'{slow_i} time_lag {time_lag:.1f} time correction {time_correction:.1f}')

                if stack_option == 0:
                    for it in range(stack_nt):  # check points one at a time
                        it_in = int(it + time_correction)
                        if it_in >= 0 and it_in < nt - 1: # does data lie within seismogram?
                            stack[slow_i].data[it] += tr[it_in]

                if stack_option == 1:
                    arr = tr.data
                    nshift = int(time_correction)
                    if time_correction < 0:
                        nshift = nshift-1
                    if nshift <= 0:
                        nbeg1 = -nshift
                        nend1 = stack_nt
                        nbeg2 = 0
                        nend2 = stack_nt + nshift;
                    elif nshift > 0:
                        nbeg1 = 0
                        nend1 = stack_nt - nshift
                        nbeg2 = nshift
                        nend2 = stack_nt
                    if nend1 >= 0 and nbeg1 <= stack_nt:
                        stack[slow_i].data[nbeg1:nend1] += arr[nbeg2:nend2]

            done += 1
            if done % 50 == 0:
                print('        Done stacking ' + str(done) + ' out of ' + str(len(st)) + ' stations.')
        else:
            print(tr.stats.station + ' not found in station list')

#%% Plot traces
    global_max = 0
    for slow_i in range(slow_n): # find global max, and if requested, take envelope
        if len(stack[slow_i].data) == 0:
                print('%d data has zero length ' % (slow_i))
        if envelope == 1 or color_plot == 1:
            stack[slow_i].data = np.abs(hilbert(stack[slow_i].data))
        local_max = max(abs(stack[slow_i].data))
        if local_max > global_max:
            global_max = local_max
    if global_max <= 0:
        print('        global_max ' + str(global_max) + ' slow_n ' + str(slow_n))

    # create time axis (x-axis), use of slow_i here is arbitrary, oops
    ttt = (np.arange(len(stack[slow_i].data)) * stack[slow_i].stats.delta +
         (stack[slow_i].stats.starttime - t)) # in units of seconds

    # Plotting
    if color_plot == 1: # 2D color plot
        stack_array = np.zeros((slow_n,stack_nt))

    #    stack_array = np.random.rand(int(slow_n),int(stack_nt))  # test with random numbers
        min_allowed = global_max/plot_dyn_range
        if log_plot == 1:
            for it in range(stack_nt):  # check points one at a time
                for slow_i in range(slow_n):  # for this station, loop over slownesses
                    num_val = stack[slow_i].data[it]
                    if num_val < min_allowed:
                        num_val = min_allowed
                    stack_array[slow_i, it] = math.log10(num_val) - math.log10(min_allowed)
        else:
            for it in range(stack_nt):  # check points one at a time
                for slow_i in range(slow_n):  # for this station, loop over slownesses
                    stack_array[slow_i, it] = stack[slow_i].data[it]/global_max
        y, x = np.mgrid[slice(stack_slows[0], stack_slows[-1] + slow_delta, slow_delta),
                     slice(ttt[0], ttt[-1] + dt, dt)]  # make underlying x-y grid for plot
    #    y, x = np.mgrid[ stack_slows , time ]  # make underlying x-y grid for plot
        plt.close(fig_index)

        fig, ax = plt.subplots(1, figsize=(9,9))
        fig.subplots_adjust(bottom=0.3)
        c = ax.pcolormesh(x, y, stack_array, cmap=plt.cm.gist_rainbow_r)
        # c = ax.pcolormesh(x, y, stack_array, cmap=plt.cm.gist_yarg)
        # c = ax.pcolormesh(x, y, stack_array, cmap=plt.cm.binary)
        ax.axis([x.min(), x.max(), y.min(), y.max()])
        if log_plot == 1:
            fig.colorbar(c, ax=ax, label='log amplitude')
        else:
            fig.colorbar(c, ax=ax, label='linear amplitude')
        plt.figure(fig_index,figsize=(6,8))
        plt.close(fig_index)
    else: # line plot
        for slow_i in range(slow_n):
            dist_offset = stack_slows[slow_i] # in units of slowness
            if global_norm_plot != 1:
                plt.plot(ttt, stack[slow_i].data*plot_scale_fac / (stack[slow_i].data.max()
            - stack[slow_i].data.min()) + dist_offset, color = 'black')
            else:
                plt.plot(ttt, stack[slow_i].data*plot_scale_fac / (global_max
            - stack[slow_i].data.min()) + dist_offset, color = 'black')
        plt.ylim(slowR_lo,slowR_hi)
        plt.xlim(start_buff,end_buff)
    plt.xlabel('Time (s)')
    plt.ylabel('Slowness (s/km)')
    plt.title('1Dstack   ' + str(eq_num) + '  ' + date_label)
    # os.chdir('/Users/vidale/Documents/PyCode/Plots')
    # plt.savefig(date_label + '_' + str(start_buff) + '_' + str(end_buff) + '_1D.png')
    plt.show()

#%% Save processed files
    print('        Stack has ' + str(len(stack)) + ' slownesses')
#
#    if ARRAY == 0:
#        goto = '/Users/vidale/Documents/PyCode/Hinet'
#    if ARRAY == 1:
#        goto = '/Users/vidale/Documents/PyCode/LASA/Pro_Files'
#    os.chdir(goto)
#    fname = 'HD' + date_label + '_1dstack.mseed'
#    stack.write(fname,format = 'MSEED')

    elapsed_time_wc = time.time() - start_time_wc
    print(f'    This job took   {elapsed_time_wc:.1f}   seconds')
    os.system('say "Done"')
コード例 #23
0
#Read dataset
with pyasdf.ASDFDataSet('/media/chet/rotnga_data/pyasdf/mrp_rotnga.h5') as ds:
    #Read in catalogs
    for a_cat in cat_list:
        cat = read_events(a_cat)
        ev_times = []
        print('Establishing catalog start/end times...')
        for event in cat:
            ev_times.append(event.origins[0].time)
        #Establish start and end dates of this chunk of catalog
        startday = min(ev_times).date
        endday = max(ev_times).date
        #Loop over each possible day in catalog
        for dt in rrule.rrule(rrule.DAILY, dtstart=startday, until=endday):
            #Figure out start and end times for filter/pyasdf
            starttime = UTCDateTime(dt)
            endtime = UTCDateTime(dt + timedelta(days=1))
            #Convert to string for Catalog.filter
            start_str = 'time > ' + str(starttime)
            end_str = 'time < ' + str(endtime)
            print('Starting day loop for ' + start_str)
            day_cat = cat.filter(start_str, end_str)
            if len(day_cat) == 0:
                print('No events for this day...')
                continue
            print('Reading in waveforms from pyasdf for: ' + start_str
                  + ' --> ' + end_str)
            for station in sta_list:
                for ds_station in ds.ifilter(ds.q.station == station,
                                             ds.q.starttime > starttime - 30,
                                             ds.q.endtime < endtime + 30):
コード例 #24
0
        waven = ddirwa + date + net + '.' + sta + '.' + chann + '.' + 'SAC.wa'

        try:
            st = read(wave)
            ste = read(wavee)
            stn = read(waven)

            tr = st[0]
            tr.detrend('demean')
            tr.detrend('linear')
            tr.filter(type="bandpass",
                      freqmin=2.0,
                      freqmax=24.0,
                      zerophase=True)
            df = tr.stats.sampling_rate
            tstart = tr.stats.starttime - UTCDateTime(year, mon, day, 0, 0, 0)
            #print(tstart)
            output = './' + date + net + '.' + sta + '.' + 'P.txt'

            # Characteristic function and trigger onsets, see ObsPy website
            cft = recursive_sta_lta(tr.data, int(0.1 * df), int(2.5 * df))
            on_of = trigger_onset(cft, 6.0, 2.0)

            # Corrected amplitude (local magnitude)
            tre = ste[0]
            tre.detrend('demean')
            tre.detrend('linear')
            tre.filter(type="bandpass",
                       freqmin=0.2,
                       freqmax=10.0,
                       zerophase=True)
コード例 #25
0
def cutSacByQuakeForCmpAz(quake,staInfos,getFilename,comp=['BHE','BHN','BHZ'],\
    R=[-90,90,-180,180],outDir='/home/jiangyr/cmpaz/cmpAZ/example/',delta=0.01\
    ,B=-200,E=1800,isFromO=False,decMul=10,nameMode='cmpAz',maxDT=100000):
    time0 = quake.time
    tmpDir = outDir
    if not os.path.exists(tmpDir):
        os.mkdir(tmpDir)
    n = int((E - B) / delta)
    pTimeL = quake.getPTimeL(staInfos)
    for ii in range(len(staInfos)):
        staInfo = staInfos[ii]
        if nameMode == 'ML' and len(quake) > 0 and (
                pTimeL[ii] <= 0 or (pTimeL[ii] - quake.time) > maxDT):
            print('skip')
            continue

        if staInfo['la']>=R[0] and \
            staInfo['la']<=R[1] and \
            staInfo['lo']>=R[2] and \
            staInfo['lo']<=R[3]:
            print(staInfo['net'] + staInfo['sta'])
            if nameMode == 'cmpAz':
                staDir = outDir + '/' + staInfo['net'] + '.' + staInfo[
                    'sta'] + '/'
                if not os.path.exists(staDir):
                    os.mkdir(staDir)
                rawDir = staDir + '/' + 'raw/'
            bTime = time0 + B
            eTime = time0 + E
            YmdHMSj0 = tool.getYmdHMSj(UTCDateTime(bTime))
            YmdHMSj1 = tool.getYmdHMSj(UTCDateTime(eTime))
            Y = tool.getYmdHMSj(UTCDateTime(time0))
            if nameMode == 'ML':
                rawDir = outDir + '/' + Y['Y'] + Y['m'] + Y['d'] + Y['H'] + Y[
                    'M'] + '%05.2f/' % (time0 % 60)
            if not os.path.exists(rawDir):
                os.mkdir(rawDir)
            for c in comp:
                if nameMode == 'cmpAz':
                    sacName=Y['Y']+Y['m']+Y['d']+'_'+Y['H']+Y['M']+'.'\
                    +staInfo['sta']+'.'+c
                if nameMode == 'ML':
                    sacName = staInfo['sta'] + '.' + c
                fileNames=getFilename(staInfo['net'],staInfo['sta'],c,YmdHMSj0)\
                +getFilename(staInfo['net'],staInfo['sta'],c,YmdHMSj1)
                fileNames = list(set(fileNames))
                sacM = mergeSacByName(fileNames, delta0=delta)
                print(sacM)
                if not sacM == None:
                    try:
                        sacM.interpolate(int(1 / delta),
                                         starttime=bTime,
                                         npts=n)
                    except:
                        print('no data')
                        continue
                    else:
                        pass
                    if isFromO:
                        time0 = bTime
                    adjust(sacM,loc=[staInfo['la'],staInfo['lo'],staInfo['dep']],kzTime=time0,\
                        decMul=decMul,eloc=quake.loc,net=staInfo['net'],sta=staInfo['sta'],\
                        chn=c)
                    os.system('mv %s  %s' % (tmpSac1, rawDir + sacName))
コード例 #26
0
ファイル: arguments.py プロジェクト: wbm06/SplitPy
def get_arguments_calc_manual(argv=None):
    """
    Get Options from :class:`~optparse.OptionParser` objects.

    This function is used for processing SKS data offline 

    """

    parser = ArgumentParser(
        usage="%(prog)s [arguments] <station database>",
        description="Script to process "
        "and calculate the spliting parameters for a dataset " +
        "that has already been downloaded by split_calc_auto.py. ")

    # General Settings
    parser.add_argument("indb",
                        help="Station Database to process from.",
                        type=str)
    parser.add_argument(
        "--keys",
        action="store",
        type=str,
        dest="stkeys",
        default="",
        help="Specify a comma separated list of station keys " +
        "for which to perform analysis. These must be " +
        "contained within the station database. Partial keys " +
        "will be used to match against those in the " +
        "dictionary. For instance, providing IU will match " +
        "with all stations in the IU network [Default " +
        "processes all stations in the database]")
    parser.add_argument("-v",
                        "-V",
                        "--verbose",
                        action="store_true",
                        dest="verb",
                        default=False,
                        help="Specify to increase verbosity.")

    # Constants Settings
    ConstGroup = parser.add_argument_group(
        title='Parameter Settings',
        description="Miscellaneous default values and settings")
    ConstGroup.add_argument(
        "--window",
        action="store",
        type=float,
        dest="dts",
        default=120.,
        help="Specify time window length before and after the SKS "
        "arrival. The total window length is 2*dst (sec). [Default 120]")
    ConstGroup.add_argument(
        "--max-delay",
        action="store",
        type=float,
        dest="maxdt",
        default=4.,
        help="Specify the maximum delay time. [Default 4 s]")
    ConstGroup.add_argument("--time-increment",
                            action="store",
                            type=float,
                            dest="ddt",
                            default=0.1,
                            help="Specify the time increment. [Default 0.1 s]")
    ConstGroup.add_argument("--angle-increment",
                            action="store",
                            type=float,
                            dest="dphi",
                            default=1.,
                            help="Specify the angle increment. [Default 1 d]")
    ConstGroup.add_argument(
        "--transverse-SNR",
        action="store",
        type=float,
        dest="snrTlim",
        default=1.,
        help="Specify the minimum SNR Threshold for the Transverse " +
        "component to be considered Non-Null. [Default 1.]")

    # Event Selection Criteria
    EventGroup = parser.add_argument_group(
        title="Event Settings",
        description="Settings associated with " +
        "refining the events to include in matching station pairs")
    EventGroup.add_argument(
        "--start",
        action="store",
        type=str,
        dest="startT",
        default="",
        help="Specify a UTCDateTime compatible string representing the " +
        "start time for the event search. This will override any station " +
        "start times. [Default more recent start date for each station pair]")
    EventGroup.add_argument(
        "--end",
        action="store",
        type=str,
        dest="endT",
        default="",
        help="Specify a UTCDateTime compatible string representing the " +
        "end time for the event search. This will override any station " +
        "end times [Default older end date for each the pair of stations]")
    EventGroup.add_argument(
        "--reverse-order",
        "-R",
        action="store_true",
        dest="reverse",
        default=False,
        help="Reverse order of events. Default behaviour starts at oldest " +
        "event and works towards most recent. Specify reverse order and " +
        "instead the program will start with the most recent events and " +
        "work towards older")

    args = parser.parse_args(argv)

    # Check inputs
    if not exist(args.indb):
        parser.error("Input file " + args.indb + " does not exist")

    # create station key list
    if len(args.stkeys) > 0:
        args.stkeys = args.stkeys.split(',')

    # construct start time
    if len(args.startT) > 0:
        try:
            args.startT = UTCDateTime(args.startT)
        except:
            parser.error("Cannot construct UTCDateTime from start time: " +
                         args.startT)
    else:
        args.startT = None

    # construct end time
    if len(args.endT) > 0:
        try:
            args.endT = UTCDateTime(args.endT)
        except:
            parser.error("Cannot construct UTCDateTime from end time: " +
                         args.endT)
    else:
        args.endT = None

    return args
コード例 #27
0
ファイル: knet.py プロジェクト: Brtle/obspy
def _read_knet_hdr(hdrlines, convert_stnm=False, **kwargs):
    """
    Read the header values into a dictionary.

    :param hdrlines: List of the header lines of a a K-NET/KiK-net ASCII file
    :type hdrlines: list
    :param convert_stnm: For station names with 6 letters write the last two
        letters of the station code to the 'location' field
    :type convert_stnm: bool
    """
    hdrdict = {'knet': {}}
    hdrnames = ['Origin Time', 'Lat.', 'Long.', 'Depth. (km)', 'Mag.',
                'Station Code', 'Station Lat.', 'Station Long.',
                'Station Height(m)', 'Record Time', 'Sampling Freq(Hz)',
                'Duration Time(s)', 'Dir.', 'Scale Factor', 'Max. Acc. (gal)',
                'Last Correction', 'Memo.']
    _i = 0
    # Event information
    flds = _prep_hdr_line(hdrnames[_i], hdrlines[_i])
    dt = flds[2] + ' ' + flds[3]
    dt = UTCDateTime.strptime(dt, '%Y/%m/%d %H:%M:%S')
    # All times are in Japanese standard time which is 9 hours ahead of UTC
    dt -= 9 * 3600.
    hdrdict['knet']['evot'] = dt

    _i += 1
    flds = _prep_hdr_line(hdrnames[_i], hdrlines[_i])
    lat = float(flds[1])
    hdrdict['knet']['evla'] = lat

    _i += 1
    flds = _prep_hdr_line(hdrnames[_i], hdrlines[_i])
    lon = float(flds[1])
    hdrdict['knet']['evlo'] = lon

    _i += 1
    flds = _prep_hdr_line(hdrnames[_i], hdrlines[_i])
    dp = float(flds[2])
    hdrdict['knet']['evdp'] = dp

    _i += 1
    flds = _prep_hdr_line(hdrnames[_i], hdrlines[_i])
    mag = float(flds[1])
    hdrdict['knet']['mag'] = mag

    # Station information
    _i += 1
    flds = _prep_hdr_line(hdrnames[_i], hdrlines[_i])
    # K-NET and KiK-Net station names can be more than 5 characters long
    # which will cause the station name to be truncated when writing the
    # the trace as miniSEED; if convert_stnm is enabled, the last two
    # letters of the station code are written to the 'location' field
    stnm = flds[2]
    location = ''
    if convert_stnm and len(stnm) > 5:
        location = stnm[-2:]
        stnm = stnm[:-2]
    if len(stnm) > 7:
        raise KNETException(
            "Station name can't be more than 7 characters long!")
    hdrdict['station'] = stnm
    hdrdict['location'] = location

    _i += 1
    flds = _prep_hdr_line(hdrnames[_i], hdrlines[_i])
    hdrdict['knet']['stla'] = float(flds[2])

    _i += 1
    flds = _prep_hdr_line(hdrnames[_i], hdrlines[_i])
    hdrdict['knet']['stlo'] = float(flds[2])

    _i += 1
    flds = _prep_hdr_line(hdrnames[_i], hdrlines[_i])
    hdrdict['knet']['stel'] = float(flds[2])

    # Data information
    _i += 1
    flds = _prep_hdr_line(hdrnames[_i], hdrlines[_i])
    dt = flds[2] + ' ' + flds[3]
    # A 15 s delay is added to the record time by the
    # the K-NET and KiK-Net data logger
    dt = UTCDateTime.strptime(dt, '%Y/%m/%d %H:%M:%S') - 15.0
    # All times are in Japanese standard time which is 9 hours ahead of UTC
    dt -= 9 * 3600.
    hdrdict['starttime'] = dt

    _i += 1
    flds = _prep_hdr_line(hdrnames[_i], hdrlines[_i])
    freqstr = flds[2]
    m = re.search('[0-9]*', freqstr)
    freq = int(m.group())
    hdrdict['sampling_rate'] = freq

    _i += 1
    flds = _prep_hdr_line(hdrnames[_i], hdrlines[_i])
    hdrdict['knet']['duration'] = float(flds[2])

    _i += 1
    flds = _prep_hdr_line(hdrnames[_i], hdrlines[_i])
    channel = flds[1].replace('-', '')
    kiknetcomps = {'1': 'NS1', '2': 'EW1', '3': 'UD1',
                   '4': 'NS2', '5': 'EW2', '6': 'UD2'}
    if channel.strip() in kiknetcomps.keys():  # kiknet directions are 1-6
        channel = kiknetcomps[channel.strip()]
    hdrdict['channel'] = channel

    _i += 1
    flds = _prep_hdr_line(hdrnames[_i], hdrlines[_i])
    eqn = flds[2]
    num, denom = eqn.split('/')
    num = float(re.search('[0-9]*', num).group())
    denom = float(denom)
    # convert the calibration from gal to m/s^2
    hdrdict['calib'] = 0.01 * num / denom

    _i += 1
    flds = _prep_hdr_line(hdrnames[_i], hdrlines[_i])
    acc = float(flds[3])
    hdrdict['knet']['accmax'] = acc

    _i += 1
    flds = _prep_hdr_line(hdrnames[_i], hdrlines[_i])
    dt = flds[2] + ' ' + flds[3]
    dt = UTCDateTime.strptime(dt, '%Y/%m/%d %H:%M:%S')
    # All times are in Japanese standard time which is 9 hours ahead of UTC
    dt -= 9 * 3600.
    hdrdict['knet']['last correction'] = dt

    # The comment ('Memo') field is optional
    _i += 1
    flds = _prep_hdr_line(hdrnames[_i], hdrlines[_i])
    if len(flds) > 1:
        hdrdict['knet']['comment'] = ' '.join(flds[1:])

    if len(hdrlines) != _i + 1:
        raise KNETException("Expected %d header lines but got %d"
                            % (_i + 1, len(hdrlines)))
    return hdrdict
コード例 #28
0
# ########################## INPUT
req_client = "RESIF"
starttime = None
endtime = None
network = "YV"
station = "*"
location = '*'
channel = '*H*'

file_name = 'list_stas_created.txt'
# ########################## END INPUT

client = Client(req_client)
if starttime:
    starttime = UTCDateTime(starttime)
if endtime:
    endtime = UTCDateTime(endtime)
inv = client.get_stations(network=network,
                          station=station,
                          location=location,
                          channel=channel,
                          starttime=starttime,
                          endtime=endtime,
                          level='channel')
content = inv.get_contents()
chans = list(set(content['channels']))
chans.sort()

net_inv = inv.networks[0]
コード例 #29
0
 def removal_date(self, value):
     if value is None or isinstance(value, UTCDateTime):
         self._removal_date = value
         return
     self._removal_date = UTCDateTime(value)
コード例 #30
0
    def getWaveform(self,
                    network,
                    station,
                    location=None,
                    channel=None,
                    starttime=None,
                    endtime=None,
                    apply_filter=None,
                    getPAZ=False,
                    getCoordinates=False,
                    metadata_timecheck=True,
                    **kwargs):
        """
        Gets a ObsPy Stream object.

        :type network: str
        :param network: Network code, e.g. ``'BW'``.
        :type station: str
        :param station: Station code, e.g. ``'MANZ'``.
        :type location: str
        :param location: Location code, e.g. ``'00'``.
        :type channel: str
        :param channel: Channel code, supporting wildcard for component,
            e.g. ``'EHE'`` or ``'EH*'``.
        :type starttime: :class:`~obspy.core.utcdatetime.UTCDateTime`
        :param starttime: Start date and time.
        :type endtime: :class:`~obspy.core.utcdatetime.UTCDateTime`
        :param endtime: End date and time.
        :type apply_filter: bool, optional
        :param apply_filter: Apply filter (default is ``False``).
        :type getPAZ: bool, optional
        :param getPAZ: Fetch PAZ information and append to
            :class:`~obspy.core.trace.Stats` of all fetched traces. This
            considerably slows down the request (default is ``False``).
        :type getCoordinates: bool, optional
        :param getCoordinates: Fetch coordinate information and append to
            :class:`~obspy.core.trace.Stats` of all fetched traces. This
            considerably slows down the request (default is ``False``).
        :type metadata_timecheck: bool, optional
        :param metadata_timecheck: For ``getPAZ`` and ``getCoordinates`` check
            if metadata information is changing from start to end time. Raises
            an Exception if this is the case. This can be deactivated to save
            time.
        :rtype: :class:`~obspy.core.stream.Stream`
        :return: A ObsPy Stream object.
        """
        # NOTHING goes ABOVE this line!
        # append all args to kwargs, thus having everything in one dictionary
        for key, value in locals().items():
            if key not in ["self", "kwargs"]:
                kwargs[key] = value

        # allow time strings in arguments
        for time_ in ["starttime", "endtime"]:
            if isinstance(kwargs[time_], (str, native_str)):
                kwargs[time_] = UTCDateTime(kwargs[time_])

        trim_start = kwargs['starttime']
        trim_end = kwargs['endtime']
        # we expand the requested timespan on both ends by two samples in
        # order to be able to make use of the nearest_sample option of
        # stream.trim(). (see trim() and tickets #95 and #105)
        # only possible if a channel is specified otherwise delta = 0
        delta = 2 * guessDelta(kwargs['channel'])
        kwargs['starttime'] = trim_start - delta
        kwargs['endtime'] = trim_end + delta

        url = '/seismology/waveform/getWaveform'
        data = self.client._fetch(url, **kwargs)
        if not data:
            raise Exception("No waveform data available")
        # unpickle
        stream = _unpickle(data)
        if len(stream) == 0:
            raise Exception("No waveform data available")
        stream._cleanup()

        # trimming needs to be done only if we extend the datetime above
        if channel:
            stream.trim(trim_start, trim_end)
        if getPAZ:
            for tr in stream:
                paz = self.client.station.getPAZ(seed_id=tr.id,
                                                 datetime=starttime)
                if metadata_timecheck:
                    paz_check = self.client.station.getPAZ(seed_id=tr.id,
                                                           datetime=endtime)
                    if paz != paz_check:
                        msg = "PAZ information changing from start time to" + \
                              " end time."
                        raise Exception(msg)
                tr.stats['paz'] = paz

        if getCoordinates:
            coords = self.client.station.getCoordinates(network=network,
                                                        station=station,
                                                        location=location,
                                                        datetime=starttime)
            if metadata_timecheck:
                coords_check = self.client.station.getCoordinates(
                    network=network,
                    station=station,
                    location=location,
                    datetime=endtime)
                if coords != coords_check:
                    msg = "Coordinate information changing from start " + \
                          "time to end time."
                    raise Exception(msg)
            for tr in stream:
                tr.stats['coordinates'] = coords.copy()
        return stream
コード例 #31
0
 def installation_date(self, value):
     if value is None or isinstance(value, UTCDateTime):
         self._installation_date = value
         return
     self._installation_date = UTCDateTime(value)
コード例 #32
0
    def getPAZ(self, seed_id, datetime):
        """
        Get PAZ for a station at given time span. Gain is the A0 normalization
        constant for the poles and zeros.

        :type seed_id: str
        :param seed_id: SEED or channel id, e.g. ``"BW.RJOB..EHZ"`` or
            ``"EHE"``.
        :type datetime: :class:`~obspy.core.utcdatetime.UTCDateTime`
        :param datetime: Time for which the PAZ is requested,
            e.g. ``'2010-01-01 12:00:00'``.
        :rtype: dict
        :return: Dictionary containing zeros, poles, gain and sensitivity.

        .. rubric:: Example

        >>> c = Client(timeout=2)
        >>> paz = c.station.getPAZ('BW.MANZ..EHZ', '20090707')
        >>> paz['zeros']
        [0j, 0j]
        >>> len(paz['poles'])
        5
        >>> print(paz['poles'][0])
        (-0.037004+0.037016j)
        >>> paz['gain']
        60077000.0
        >>> paz['sensitivity']
        2516800000.0
        """
        # try to read PAZ from previously obtained XSEED data
        for res in self.client.xml_seeds.get(seed_id, []):
            parser = Parser(res)
            try:
                paz = parser.getPAZ(seed_id=seed_id,
                                    datetime=UTCDateTime(datetime))
                return paz
            except:
                continue
        network, station, location, channel = seed_id.split(".")
        # request station information
        station_list = self.getList(network=network,
                                    station=station,
                                    datetime=datetime)
        if not station_list:
            return {}
        # don't allow wild cards
        for wildcard in ['*', '?']:
            if wildcard in seed_id:
                msg = "Wildcards in seed_id are not allowed."
                raise ValueError(msg)

        if len(station_list) > 1:
            warnings.warn("Received more than one XSEED file. Using first.")

        xml_doc = station_list[0]
        res = self.client.station.getResource(xml_doc['resource_name'])
        reslist = self.client.xml_seeds.setdefault(seed_id, [])
        if res not in reslist:
            reslist.append(res)
        parser = Parser(res)
        paz = parser.getPAZ(seed_id=seed_id, datetime=UTCDateTime(datetime))
        return paz
コード例 #33
0
ファイル: bayescat.py プロジェクト: nimanshr/bayescat
def main(args):
    eventid = args.id
    radius = args.radius
    #does the bayesloc folder exist?
    if not os.path.isdir(BAYESDIR):
        print FOLDER_ERROR
        sys.exit(1)
    bayesbin = os.path.join(BAYESDIR,'bin',BAYESBIN)
    ttimes = glob.glob(os.path.join(BAYESDIR,'ttimes','ak135.*'))
    if not os.path.isfile(bayesbin):
        print FOLDER_ERROR
        sys.exit(1)
    if not len(ttimes):
        print FOLDER_ERROR
        sys.exit(1)
    bayesdb = os.path.join(BAYESDIR,BAYESDB)
    # if startOver and os.path.isfile(bayesdb):
    #     os.remove(bayesdb)
    #does the database exist - if not, create it
    if not os.path.isfile(bayesdb):
        db = sqlite3.connect(bayesdb)
        cursor = db.cursor()
        createTables(db,cursor)
    else:
        db = sqlite3.connect(bayesdb)
        cursor = db.cursor()

    #Delete selected list of events
    if args.delete:
        nevents = deleteEvents(db,cursor,args.delete)
        print '%i events deleted from the database.' % nevents
        sys.exit(0)
        
    #Return some stats about the current database
    if args.stats:
        nevents,nstations,narrivals = getStats(cursor)
        print 'Your database contains information about:'
        print '\t%i events' % nevents
        print '\t%i stations' % nstations
        print '\t%i picks' % narrivals
        sys.exit(0)
        
    eventinfo = getPhaseData(eventid=eventid)
    if not len(eventinfo):
        print 'Could not find event %s in ComCat.  Returning.'
        sys.exit(1)

    #get the information about the input event
    eventinfo = eventinfo[0]
    eventlat = eventinfo.origins[0]['lat']
    eventlon = eventinfo.origins[0]['lon']
    eventtime = eventinfo.origins[0]['time']
    if eventtime < args.begindate or eventtime > args.enddate:
        fmt = 'Event %s (%s) is outside the time bounds you specified. %s to %s.  Exiting.' 
        print fmt % (eventinfo.eventcode,eventtime,args.begindate,args.enddate)
        sys.exit(1)

    tnow = datetime.utcnow()
    eventfolder = os.path.join(BAYESDIR,'events',eventid)
    if not os.path.isdir(eventfolder):
        os.makedirs(eventfolder)

    # eventlist1 = getEventData(radius=(eventlat,eventlon,0,radius),
    #                          starttime=args.begindate,
    #                          endtime=args.enddate,catalog='pde')
    eventlist = getEventData(radius=(eventlat,eventlon,0,radius),
                              starttime=args.begindate,
                              endtime=args.enddate,catalog='us')
    #eventlist = eventlist1 + eventlist2

    if args.count:
        fmt = 'There are %i events inside %.1f km radius around event %s (%.4f,%.4f)'
        print fmt % (len(eventlist),radius,eventid,eventlat,eventlon)
        sys.exit(0)
    
    #check to see if event has already been located - if so, stop, unless we're being forced
    if not args.force:
        sql = 'SELECT id,code,rlat,rlon,rdepth,rtime FROM event WHERE code="%s"' % eventid
        cursor.execute(sql)
        row = cursor.fetchone()
        if row is not None and row[2] is not None:
            print 'Event %s is already in the database.  Stopping.' % eventid
            sys.exit(0)
    
    priors = getEventPriors(eventlist,cursor)
    stations,arrivals,newevents = getProcessedData(eventlist,db,cursor,ndays=NWEEKS*7)
    fmt = 'In database: %i stations, %i arrivals.  %i events not in db.'
    #print fmt % (len(stations),len(arrivals),len(newevents))
    missing_stations = []
    for event in newevents:
        phasedata = getPhaseData(eventid=event)
        if phasedata is None:
            continue
        if not len(phasedata[0].magnitudes):
            continue
        newstations,newarrivals,ms = insertPhaseData(phasedata[0],db,cursor)
        stations = dict(stations.items() + newstations.items())
        arrivals += newarrivals
        missing_stations += ms

    print 'After searching online:'
    fmt = 'In database: %i stations, %i arrivals.  %i missing stations.'
    print fmt % (len(stations),len(arrivals),len(missing_stations))
    stafile = 'station.dat'
    stationfile = os.path.join(eventfolder,stafile)
    f = open(stationfile,'wt')
    f.write('sta_id lat lon elev\n')
    for stationcode,stationvals in stations.iteritems():
        slat,slon,elev = stationvals
        f.write('%s %.4f %.4f %.3f\n' % (stationcode,slat,slon,elev))
    f.close()

    arrfile = 'arrival.dat'
    arrivalfile = os.path.join(eventfolder,arrfile)
    f = open(arrivalfile,'wt')
    f.write('ev_id sta_id phase time\n')
    for arrival in arrivals:
        eid,scode,phase,time = arrival
        f.write('%i %s %s %.3f\n' % (eid,scode,phase,time))
    f.close()

    prifile = 'prior.dat' #??
    priorfile = os.path.join(eventfolder,prifile)
    f = open(priorfile,'wt')
    f.write('ev_id lat_mean lon_mean dist_sd depth_mean depth_sd time_mean time_sd\n')
    for prior in priors:
        evid,plat,plon,pdepth,ptime = prior
        f.write('%i %.4f %.4f 0.0 %.1f 0.0 %.3f 0.0\n' % (evid,plat,plon,pdepth,ptime))
    f.close()

    #write the config file
    configfile = os.path.join(eventfolder,'bayesloc.cfg')
    config = CONFIG.replace('BAYESLOC',BAYESLOC)
    config = config.replace('EVENTFOLDER',eventfolder)
    fcfg = open(configfile,'wt')
    fcfg.write(config)
    fcfg.close()

    #Run the BayesLoc program
    #change to the eventfolder
    cwd = os.getcwd()
    os.chdir(eventfolder)
    bayesbin = os.path.join(BAYESLOC,'bin','bayesloc')
    cmd = '%s %s' % (bayesbin,configfile)
    print 'Running command %s...' % cmd
    t1 = datetime.now()
    # process = subprocess.Popen(cmd, stdout=subprocess.PIPE)
    # for c in iter(lambda: process.stdout.read(1), ''):
    #     sys.stderr.write(c)
    res,stdout,stderr = getCommandOutput(cmd)
    t2 = datetime.now()
    if not res:
        print 'BayesLoc command "%s" failed.  \n%s\n%s.' % (cmd,stdout,stderr)
        sys.exit(1)
    else:
        dt = ((t2-t1).seconds)/60.0
        print 'BayesLoc command was successful - took %.1f minutes.' % dt
    os.chdir(cwd)

    resultfile = os.path.join(eventfolder,'output','origins_ned_stats.out')
    f = open(resultfile,'rt')
    f.readline()
    eventlist = []
    fieldlist = ['lat','lon','depth','time','rlat','rlon','rdepth','rtime','mag','nevents']
    nevents = len(priors) + len(newevents)
    for line in f.readlines():
        parts = line.split()
        eid = int(parts[0])
        lat = float(parts[1])
        lon = float(parts[2])
        depth = float(parts[3])
        time = UTCDateTime(float(parts[4])).datetime
        efmt = 'UPDATE event set rlat=%.4f,rlon=%.4f,rdepth=%.1f,rtime="%s",nevents=%i WHERE id=%i'
        equery = efmt % (lat,lon,depth,time,nevents,eid)
        cursor.execute(equery)
        db.commit()
        query = 'SELECT %s FROM event WHERE id=%i' % (','.join(fieldlist),eid)
        cursor.execute(query)
        row = cursor.fetchone()
        eventlist.append(dict(zip(fieldlist,row)))
    f.close()

    #make a map of all the relocated events
    fname = makeMap(eventlist,eventlat,eventlon,eventfolder)
    print 'Relocated events: %s' % fname
    
    #tell the user what happened with the relocation
    fmt = 'SELECT lat,lon,depth,time,rlat,rlon,rdepth,rtime,nevents FROM event WHERE code="%s"'
    query = fmt % (eventid)
    cursor.execute(query)
    row = cursor.fetchone()
    lat,lon,depth,time,rlat,rlon,rdepth,rtime,nevents = row
    time = UTCDateTime(time).datetime
    rtime = UTCDateTime(rtime).datetime
    if rtime >= time:
        dt = (rtime-time).seconds + ((rtime-time).microseconds)/float(1e6)
    else:
        dt = (time-rtime).seconds + ((time-rtime).microseconds)/float(1e6)
    dd,az1,az2 = gps2DistAzimuth(lat,lon,rlat,rlon)
    dd /= 1000.0
    print 'Event %s was relocated using %i events.' % (eventid,nevents)
    print 'Starting:  %s (%.4f,%.4f) %.1f km' % (time.strftime('%Y-%m-%d %H:%M:%S'),lat,lon,depth)
    print 'Relocated: %s (%.4f,%.4f) %.1f km' % (rtime.strftime('%Y-%m-%d %H:%M:%S'),rlat,rlon,rdepth)
    print '%.1f km (%.1f degrees), %.1f seconds' % (dd,az1,dt)
    cursor.close()
    db.close()
コード例 #34
0
ファイル: make_martin_noise.py プロジェクト: cjhopp/scripts
from obspy import read, read_events, read_inventory, Catalog, Stream, Trace, UTCDateTime
from surf_seis.vibbox import vibbox_read
from glob import glob

vbboxes = glob(
    '/media/chet/data/chet-collab/wavs/test_vbox_raw/vbox_2018051714*')
st_raw = Stream()
for vb in vbboxes:
    st_raw += vibbox_read(vb).select(station='OT16').copy()
st_interp = st_raw.copy()
# Fill gaps via interpolation
st_interp.merge(fill_value='interpolate')
# Demean each trace
for tr in st_interp:
    tr.detrend(type='demean')
# Traces too large to write, break into 10-minute chunks
start = UTCDateTime(2018, 5, 17, 14)
for i in range(6):
    start_slice = start + (600 * i)
    end_slice = start_slice + 600.
    for tr in st_interp:
        nm = '{}.{}..{}.{}.mseed'.format(tr.stats.network, tr.stats.station,
                                         tr.stats.channel, start_slice)
        tr.slice(starttime=start_slice, endtime=end_slice).write(nm)
コード例 #35
0
ファイル: arguments.py プロジェクト: wbm06/SplitPy
def get_arguments_calc_auto(argv=None):
    """
    Get Options from :class:`~optparse.OptionParser` objects.

    This function is used for data processing on-the-fly (requires web connection)

    """

    parser = ArgumentParser(
        usage="%(prog)s [arguments] <station database>",
        description="Script wrapping "
        "together the python-based implementation of SplitLab by " +
        "Wustefeld and others. This version " +
        "requests data on the fly for a given date range. Data is " +
        "requested from the internet using " +
        "the client services framework or from data provided on a " +
        "local disk. The stations are processed " +
        "one by one with the SKS Splitting parameters measured " +
        "individually using both the " +
        "Rotation-Correlation (RC) and Silver & Chan (SC) methods.")
    parser.add_argument("indb",
                        help="Station Database to process from.",
                        type=str)
    parser.add_argument(
        "--keys",
        action="store",
        type=str,
        dest="stkeys",
        default="",
        help="Specify a comma separated list of station keys for " +
        "which to perform the analysis. These must be " +
        "contained within the station database. Partial keys " +
        "will be used to match against those in the " +
        "dictionary. For instance, providing IU will match with " +
        "all stations in the IU network [Default processes " +
        "all stations in the database]")
    parser.add_argument("-v",
                        "-V",
                        "--verbose",
                        action="store_true",
                        dest="verb",
                        default=False,
                        help="Specify to increase verbosity.")
    parser.add_argument(
        "-O",
        "--overwrite",
        action="store_true",
        dest="ovr",
        default=False,
        help="Force the overwriting of pre-existing Split results. " +
        "Default behaviour prompts for those that " +
        "already exist. Selecting overwrite and skip (ie, both flags) " +
        "negate each other, and both are set to " +
        "false (every repeat is prompted). [Default False]")
    parser.add_argument(
        "-K",
        "--skip-existing",
        action="store_true",
        dest="skip",
        default=False,
        help="Skip any event for which existing splitting results are " +
        "saved to disk. Default behaviour prompts for " +
        "each event. Selecting skip and overwrite (ie, both flags) " +
        "negate each other, and both are set to " +
        "False (every repeat is prompted). [Default False]")
    parser.add_argument(
        "-C",
        "--calc",
        action="store_true",
        dest="calc",
        default=False,
        help="Analyze data for shear-wave splitting. [Default saves data " +
        "to folders for subsequent analysis]")
    parser.add_argument(
        "-P",
        "--plot-diagnostic",
        action="store_true",
        dest="diagplot",
        default=False,
        help="Plot diagnostic window at end of process. [Default False]")
    parser.add_argument(
        "-R",
        "--recalc",
        action="store_true",
        dest="recalc",
        default=False,
        help="Re-calculate estimates and overwrite existing splitting " +
        "results without re-downloading data. [Default False]")

    # Server Settings
    ServerGroup = parser.add_argument_group(
        title="Server Settings",
        description="Settings associated with which " +
        "datacenter to log into.")
    ServerGroup.add_argument(
        "-S",
        "--Server",
        action="store",
        type=str,
        dest="Server",
        default="IRIS",
        help="Specify the server to connect to. Options include: " +
        "BGR, ETH, GEONET, GFZ, INGV, IPGP, IRIS, KOERI, LMU, NCEDC, " +
        "NEIP, NERIES, ODC, ORFEUS, RESIF, SCEDC, USGS, USP. [Default IRIS]")
    ServerGroup.add_argument(
        "-U",
        "--User-Auth",
        action="store",
        type=str,
        dest="UserAuth",
        default="",
        help="Enter your IRIS Authentification Username and Password " +
        "(--User-Auth='username:authpassword') to access and download " +
        "restricted data. [Default no user and password]")

    # Database Settings
    DataGroup = parser.add_argument_group(
        title="Local Data Settings",
        description="Settings associated with defining and using a " +
        "local data base of pre-downloaded day-long SAC files.")
    DataGroup.add_argument(
        "--local-data",
        action="store",
        type=str,
        dest="localdata",
        default=None,
        help="Specify a comma separated list of paths containing " +
        "day-long sac files of data already downloaded. " +
        "If data exists for a seismogram is already present on " +
        "disk, it is selected preferentially over downloading " +
        "the data using the Client interface")
    DataGroup.add_argument(
        "--no-data-zero",
        action="store_true",
        dest="ndval",
        default=False,
        help="Specify to force missing data to be set as zero, rather " +
        "than default behaviour which sets to nan.")
    DataGroup.add_argument(
        "--no-local-net",
        action="store_false",
        dest="useNet",
        default=True,
        help="Specify to prevent using the Network code in the " +
        "search for local data (sometimes for CN stations " +
        "the dictionary name for a station may disagree with that " +
        "in the filename. [Default Network used]")

    # Constants Settings
    ConstGroup = parser.add_argument_group(
        title='Parameter Settings',
        description="Miscellaneous default values and settings")
    ConstGroup.add_argument(
        "--sampling-rate",
        action="store",
        type=float,
        dest="new_sampling_rate",
        default=10.,
        help="Specify new sampling rate in Hz. [Default 10.]")
    ConstGroup.add_argument(
        "--min-snr",
        action="store",
        type=float,
        dest="msnr",
        default=5.,
        help="Minimum SNR value calculated on the radial (Q) component " +
        "to proceed with analysis (dB). [Default 5.]")
    ConstGroup.add_argument(
        "--window",
        action="store",
        type=float,
        dest="dts",
        default=120.,
        help="Specify time window length before and after the SKS "
        "arrival. The total window length is 2*dst (sec). [Default 120]")
    ConstGroup.add_argument(
        "--max-delay",
        action="store",
        type=float,
        dest="maxdt",
        default=4.,
        help="Specify the maximum delay time in search (sec). " +
        "[Default 4]")
    ConstGroup.add_argument(
        "--dt-delay",
        action="store",
        type=float,
        dest="ddt",
        default=0.1,
        help="Specify the time delay increment in search (sec). " +
        "[Default 0.1]")
    ConstGroup.add_argument(
        "--dphi",
        action="store",
        type=float,
        dest="dphi",
        default=1.,
        help="Specify the fast angle increment in search (degree). " +
        "[Default 1.]")
    ConstGroup.add_argument(
        "--snrT",
        action="store",
        type=float,
        dest="snrTlim",
        default=1.,
        help="Specify the minimum SNR Threshold for the Transverse " +
        "component to be considered Non-Null. [Default 1.]")
    ConstGroup.add_argument(
        "--fmin",
        action="store",
        type=float,
        dest="fmin",
        default=0.02,
        help="Specify the minimum frequency corner for bandpass " +
        "filter (Hz). [Default 0.02]")
    ConstGroup.add_argument(
        "--fmax",
        action="store",
        type=float,
        dest="fmax",
        default=0.5,
        help="Specify the maximum frequency corner for bandpass " +
        "filter (Hz). [Default 0.5]")

    # Event Selection Criteria
    EventGroup = parser.add_argument_group(
        title="Event Settings",
        description="Settings associated with refining "
        "the events to include in matching station pairs")
    EventGroup.add_argument(
        "--start",
        action="store",
        type=str,
        dest="startT",
        default="",
        help="Specify a UTCDateTime compatible string representing " +
        "the start time for the event search. This will override any " +
        "station start times. [Default start date of each station]")
    EventGroup.add_argument(
        "--end",
        action="store",
        type=str,
        dest="endT",
        default="",
        help="Specify a UTCDateTime compatible string representing " +
        "the end time for the event search. This will override any " +
        "station end times [Default end date of each station]")
    EventGroup.add_argument(
        "--reverse",
        action="store_true",
        dest="reverse",
        default=False,
        help="Reverse order of events. Default behaviour starts at " +
        "oldest event and works towards most recent. " +
        "Specify reverse order and instead the program will start " +
        "with the most recent events and work towards older")
    EventGroup.add_argument(
        "--min-mag",
        action="store",
        type=float,
        dest="minmag",
        default=6.0,
        help="Specify the minimum magnitude of event for which to " +
        "search. [Default 6.0]")
    EventGroup.add_argument(
        "--max-mag",
        action="store",
        type=float,
        dest="maxmag",
        default=None,
        help="Specify the maximum magnitude of event for which to " +
        "search. [Default None, i.e. no limit]")

    # Geometry Settings
    GeomGroup = parser.add_argument_group(
        title="Geometry Settings",
        description="Settings associatd with the "
        "event-station geometries")
    GeomGroup.add_argument(
        "--min-dist",
        action="store",
        type=float,
        dest="mindist",
        default=85.,
        help="Specify the minimum great circle distance (degrees) " +
        "between the station and event. [Default 85]")
    GeomGroup.add_argument(
        "--max-dist",
        action="store",
        type=float,
        dest="maxdist",
        default=120.,
        help="Specify the maximum great circle distance (degrees) " +
        "between the station and event. [Default 120]")
    GeomGroup.add_argument(
        "--phase",
        action="store",
        type=str,
        dest="phase",
        default='SKS',
        help="Specify the phase name to use. Be careful with the distance. " +
        "setting. Options are 'SKS' or 'SKKS'. [Default 'SKS']")

    args = parser.parse_args(argv)

    # Check inputs
    if not exist(args.indb):
        parser.error("Input file " + args.indb + " does not exist")

    # create station key list
    if len(args.stkeys) > 0:
        args.stkeys = args.stkeys.split(',')

    # construct start time
    if len(args.startT) > 0:
        try:
            args.startT = UTCDateTime(args.startT)
        except:
            parser.error("Cannot construct UTCDateTime from start time: " +
                         args.startT)
    else:
        args.startT = None

    # construct end time
    if len(args.endT) > 0:
        try:
            args.endT = UTCDateTime(args.endT)
        except:
            parser.error("Cannot construct UTCDateTime from end time: " +
                         args.endT)
    else:
        args.endT = None

    # Parse User Authentification
    if not len(args.UserAuth) == 0:
        tt = args.UserAuth.split(':')
        if not len(tt) == 2:
            parser.error(
                "Error: Incorrect Username and Password Strings for " +
                "User Authentification")
        else:
            args.UserAuth = tt
    else:
        args.UserAuth = []

    # Check existing file behaviour
    if args.skip and args.ovr:
        args.skip = False
        args.ovr = False

    # Parse Local Data directories
    if args.localdata is not None:
        args.localdata = args.localdata.split(',')
    else:
        args.localdata = []

    # Check NoData Value
    if args.ndval:
        args.ndval = 0.0
    else:
        args.ndval = nan

    # Check distances for selected phase
    if args.phase not in ['SKS', 'SKKS']:
        parser.error("Error: choose between 'SKS' and 'SKKS.")
    if args.phase == 'SKS' or 'SKKS':
        if not args.mindist:
            args.mindist = 85.
        if not args.maxdist:
            args.maxdist = 120.
        if args.mindist < 85. or args.maxdist > 120.:
            parser.error("Distances should be between 85 and 120 deg. for " +
                         "teleseismic 'SKS' and 'SKKS' waves.")

    return args
コード例 #36
0
ファイル: find_LFEs.py プロジェクト: ArianeDucellier/catalog
def find_LFEs(filename, stations, tbegin, tend, TDUR, filt, \
        freq0, nattempts, waittime, draw=False, type_threshold='MAD', \
        threshold=0.0075):
    """
    Find LFEs with the temporary stations from FAME
    using the templates from Plourde et al. (2015)

    Input:
        type filename = string
        filename = Name of the template
        type stations = list of strings
        stations = name of the stations used for the matched-filter algorithm
        type tebgin = tuplet of 6 integers
        tbegin = Time when we begin looking for LFEs
        type tend = tuplet of 6 integers
        tend = Time we stop looking for LFEs
        type TDUR = float
        TDUR = Time to add before and after the time window for tapering
        type filt = tuple of floats
        filt = Lower and upper frequencies of the filter
        type freq0 = float
        freq0 = Maximum frequency rate of LFE occurrence
        type nattempts = integer
        nattempts = Number of times we try to download data
        type waittime = positive float
        waittime = Type to wait between two attempts at downloading
        type draw = boolean
        draw = Do we draw a figure of the cross-correlation?
        type type_threshold = string
        type_threshold = 'MAD' or 'Threshold'
        type threshold = float
        threshold = Cross correlation value must be higher than that
    Output:
        None
    """

    # Get the network, channels, and location of the stations
    staloc = pd.read_csv('../data/Ducellier/stations_permanent.txt', \
        sep=r'\s{1,}', header=None, engine='python')
    staloc.columns = ['station', 'network', 'channels', 'location', \
        'server', 'latitude', 'longitude', 'time_on', 'time_off']

    # Create directory to store the LFEs times
    namedir = 'LFEs/' + filename
    if not os.path.exists(namedir):
        os.makedirs(namedir)

    # File to write error messages
    namedir = 'error'
    if not os.path.exists(namedir):
        os.makedirs(namedir)
    errorfile = 'error/' + filename + '.txt'

    # Read the templates
    templates = Stream()
    for station in stations:
        data = pickle.load(open('templates_new/' + filename + \
            '/' + station + '.pkl', 'rb'))
        if (len(data) == 3):
            EW = data[0]
            NS = data[1]
            UD = data[2]
            EW.stats.station = station
            NS.stats.station = station
            EW.stats.channel = 'E'
            NS.stats.channel = 'N'
            templates.append(EW)
            templates.append(NS)
        else:
            UD = data[0]
        UD.stats.station = station
        UD.stats.channel = 'Z'
        templates.append(UD)

    # Begin and end time of analysis
    t1 = UTCDateTime(year=tbegin[0], month=tbegin[1], \
        day=tbegin[2], hour=tbegin[3], minute=tbegin[4], \
        second=tbegin[5])
    t2 = UTCDateTime(year=tend[0], month=tend[1], \
        day=tend[2], hour=tend[3], minute=tend[4], \
        second=tend[5])

    # Read the data
    data = []
    for station in stations:
        # Get station metadata for downloading
        for ir in range(0, len(staloc)):
            if (station == staloc['station'][ir]):
                network = staloc['network'][ir]
                channels = staloc['channels'][ir]
                location = staloc['location'][ir]
                server = staloc['server'][ir]

        # Duration of template
        template = templates.select(station=station, component='Z')[0]
        dt = template.stats.delta
        nt = template.stats.npts
        duration = (nt - 1) * dt   
        Tstart = t1 - TDUR
        Tend = t2 + duration + TDUR
        delta = t2 + duration - t1
        ndata = int(delta / dt) + 1

        # Orientation of template
        # Date chosen: April 1st 2008
        mychannels = channels.split(',')
        mylocation = location
        if (mylocation == '--'):
            mylocation = ''
        response = '../data/response/' + network + '_' + station + '.xml'
        inventory = read_inventory(response, format='STATIONXML')
        reference = []
        for channel in mychannels:
            angle = inventory.get_orientation(network + '.' + \
                station + '.' + mylocation + '.' + channel, \
                UTCDateTime(2012, 1, 1, 0, 0, 0))
            reference.append(angle)

        # First case: we can get the data from IRIS
        if (server == 'IRIS'):
            (D, orientation) = get_from_IRIS(station, network, channels, \
                location, Tstart, Tend, filt, dt, nattempts, waittime, \
                errorfile)
        # Second case: we get the data from NCEDC
        elif (server == 'NCEDC'):
            (D, orientation) = get_from_NCEDC(station, network, channels, \
                location, Tstart, Tend, filt, dt, nattempts, waittime, \
                errorfile)
        else:
            raise ValueError('You can only download data from IRIS and NCEDC')

        # Append data to stream
        if (type(D) == obspy.core.stream.Stream):
            stationdata = fill_data(D, orientation, station, channels, reference)
            if (len(stationdata) > 0):
                for stream in stationdata:
                    data.append(stream)

    # Number of hours of data to analyze
    nhour = int(ceil((t2 - t1) / 3600.0))

    # Create dataframe to store LFE times
    df = pd.DataFrame(columns=['year', 'month', 'day', 'hour', \
        'minute', 'second', 'cc', 'nchannel'])

    # Loop on hours of data
    for hour in range(0, nhour):
        nchannel = 0
        Tstart = t1 + hour * 3600.0
        Tend = t1 + (hour + 1) * 3600.0 + duration
        delta = Tend - Tstart
        ndata = int(delta / dt) + 1

        # Loop on channels
        for channel in range(0, len(data)):
            # Cut the data
            subdata = data[channel]
            subdata = subdata.slice(Tstart, Tend)
            # Check whether we have a complete one-hour-long recording
            if (len(subdata) == 1):
                if (len(subdata[0].data) == ndata):
                    # Get the template
                    station = subdata[0].stats.station
                    component = subdata[0].stats.channel
                    template = templates.select(station=station, \
                        component=component)[0]
                    # Cross correlation
                    cctemp = correlate.optimized(template, subdata[0])
                    if (nchannel > 0):
                        cc = np.vstack((cc, cctemp))
                    else:
                        cc = cctemp
                    nchannel = nchannel + 1
    
        if (nchannel > 0):
   
            # Compute average cross-correlation across channels
            meancc = np.mean(cc, axis=0)
            if (type_threshold == 'MAD'):
                MAD = np.median(np.abs(meancc - np.mean(meancc)))
                index = np.where(meancc >= threshold * MAD)
            elif (type_threshold == 'Threshold'):
                index = np.where(meancc >= threshold)
            else:
                raise ValueError('Type of threshold must be MAD or Threshold')
            times = np.arange(0.0, np.shape(meancc)[0] * dt, dt)

            # Get LFE times
            if np.shape(index)[1] > 0:
                (time, cc) = clean_LFEs(index, times, meancc, dt, freq0)

                # Add LFE times to dataframe
                i0 = len(df.index)
                for i in range(0, len(time)):
                    timeLFE = Tstart + time[i]
                    df.loc[i0 + i] = [int(timeLFE.year), int(timeLFE.month), \
                        int(timeLFE.day), int(timeLFE.hour), \
                        int(timeLFE.minute), timeLFE.second + \
                        timeLFE.microsecond / 1000000.0, cc[i], nchannel]

            # Draw figure
            if (draw == True):
                params = {'xtick.labelsize':16,
                          'ytick.labelsize':16}
                pylab.rcParams.update(params) 
                plt.figure(1, figsize=(20, 8))
                if np.shape(index)[1] > 0:
                    for i in range(0, len(time)):
                        plt.axvline(time[i], linewidth=2, color='grey')
                plt.plot(np.arange(0.0, np.shape(meancc)[0] * dt, \
                    dt), meancc, color='black')
                if (type_threshold == 'MAD'):
                    plt.axhline(threshold * MAD, linewidth=2, color='red', \
                        label = '{:6.2f} * MAD'.format(threshold))
                elif (type_threshold == 'Threshold'):
                    plt.axhline(threshold, linewidth=2, color='red', \
                        label = 'Threshold = {:8.4f}'.format(threshold))
                else:
                    raise ValueError( \
                        'Type of threshold must be MAD or Threshold')
                plt.xlim(0.0, (np.shape(meancc)[0] - 1) * dt)
                plt.xlabel('Time (s)', fontsize=24)
                plt.ylabel('Cross-correlation', fontsize=24)
                plt.title('Average cross-correlation across stations', \
                    fontsize=30)
                plt.legend(loc=2, fontsize=24)
                plt.savefig('LFEs/' + filename + '/' + \
                    '{:04d}{:02d}{:02d}_{:02d}{:02d}{:02d}'.format( \
                    Tstart.year, Tstart.month, Tstart.day, Tstart.hour, \
                    Tstart.minute, Tstart.second) + '.png', format='png')
                plt.close(1)

    # Add to pandas dataframe and save
    namefile = 'LFEs/' + filename + '/catalog.pkl'
    if os.path.exists(namefile):
        df_all = pickle.load(open(namefile, 'rb'))
        df_all = pd.concat([df_all, df], ignore_index=True)
    else:
        df_all = df    
    df_all = df_all.astype(dtype={'year':'int32', 'month':'int32', \
        'day':'int32', 'hour':'int32', 'minute':'int32', \
        'second':'float', 'cc':'float', 'nchannel':'int32'})
    pickle.dump(df_all, open(namefile, 'wb'))
コード例 #37
0
import os, shutil
from obspy import UTCDateTime

# parallel params
pad_dir = '/home/zhouyj/software/PAD'
shutil.copyfile('config_temp.py', os.path.join(pad_dir, 'config.py'))
time_range = '20180515-20180702'
num_workers = 10
out_root = 'output/test'
pick_dir = 'input/picks'
sta_file = 'input/example.sta'

# divide by time
start_date, end_date = [UTCDateTime(date) for date in time_range.split('-')]
dt = (end_date - start_date) / num_workers
for proc_idx in range(num_workers):
    t0 = ''.join(str((start_date + proc_idx * dt).date).split('-'))
    t1 = ''.join(str((start_date + (proc_idx + 1) * dt).date).split('-'))
    time_range = '{}-{}'.format(t0, t1)
    out_pha = '{}/phase_{}.dat'.format(out_root, t0)
    out_ctlg = '{}/catalog_{}.dat'.format(out_root, t0)
    os.system("python {}/run_assoc.py \
        --time_range={} --ppk_dir={} --sta_file={} \
        --out_ctlg={} --out_pha={} &" \
        .format(pad_dir, time_range, pick_dir, sta_file, out_ctlg, out_pha))
コード例 #38
0
def run_parallel_generate_ruptures(home,project_name,run_name,fault_name,slab_name,mesh_name,
        load_distances,distances_name,UTM_zone,tMw,model_name,hurst,Ldip,Lstrike,
        num_modes,Nrealizations,rake,buffer_factor,rise_time_depths0,rise_time_depths1,time_epi,max_slip,
        source_time_function,lognormal,slip_standard_deviation,scaling_law,ncpus,force_magnitude,
        force_area,mean_slip_name,hypocenter,slip_tol,force_hypocenter,
        no_random,shypo,use_hypo_fraction,shear_wave_fraction,max_slip_rule,rank,size):
    
    '''
    Depending on user selected flags parse the work out to different functions
    '''
    
    from numpy import load,save,genfromtxt,log10,cos,sin,deg2rad,savetxt,zeros,where
    from time import gmtime, strftime
    from numpy.random import shuffle
    from mudpy import fakequakes
    from obspy import UTCDateTime
    from obspy.taup import TauPyModel
    import warnings

    #I don't condone it but this cleans up the warnings
    warnings.filterwarnings("ignore")
    
    # Fix input formats
    rank=int(rank)
    size=int(size)
    if time_epi=='None':
        time_epi=None
    else:
        time_epi=UTCDateTime(time_epi)
    rise_time_depths=[rise_time_depths0,rise_time_depths1]
    #hypocenter=[hypocenter_lon,hypocenter_lat,hypocenter_dep]
    tMw=tMw.split(',')
    target_Mw=zeros(len(tMw))
    for rMw in range(len(tMw)):
        target_Mw[rMw]=float(tMw[rMw])

    #Should I calculate or load the distances?
    if load_distances==1:  
        Dstrike=load(home+project_name+'/data/distances/'+distances_name+'.strike.npy')
        Ddip=load(home+project_name+'/data/distances/'+distances_name+'.dip.npy')
    else:
        Dstrike,Ddip=fakequakes.subfault_distances_3D(home,project_name,fault_name,slab_name,UTM_zone)
        save(home+project_name+'/data/distances/'+distances_name+'.strike.npy',Dstrike)
        save(home+project_name+'/data/distances/'+distances_name+'.dip.npy',Ddip)
    

    #Read fault and prepare output variable
    whole_fault=genfromtxt(home+project_name+'/data/model_info/'+fault_name)
    
    #Get structure model
    vel_mod_file=home+project_name+'/structure/'+model_name
    
    #Get TauPyModel
    velmod = TauPyModel(model=home+project_name+'/structure/'+model_name.split('.')[0])

    #Now loop over the number of realizations
    realization=0
    if rank==0:
        print('Generating rupture scenarios')
    for kmag in range(len(target_Mw)):
        if rank==0:
            print('... Calculating ruptures for target magnitude Mw = '+str(target_Mw[kmag]))
        for kfault in range(Nrealizations):
            if kfault%1==0 and rank==0:
                print('... ... working on ruptures '+str(ncpus*realization)+' to ' + str(ncpus*(realization+1)-1) + ' of '+str(Nrealizations*size*len(target_Mw)))
                #print '... ... working on ruptures '+str(ncpus*realization+rank)+' of '+str(Nrealizations*size-1)
            
            #Prepare output
            fault_out=zeros((len(whole_fault),14))
            fault_out[:,0:8]=whole_fault[:,0:8]
            fault_out[:,10:12]=whole_fault[:,8:]   
            
            #Sucess criterion
            success=False
            while success==False:
                #Select only a subset of the faults based on magnitude scaling
                current_target_Mw=target_Mw[kmag]
                ifaults,hypo_fault,Lmax,Wmax,Leff,Weff=fakequakes.select_faults(whole_fault,Dstrike,Ddip,current_target_Mw,buffer_factor,num_modes,scaling_law,
                                    force_area,no_shallow_epi=False,no_random=no_random,subfault_hypocenter=shypo,use_hypo_fraction=use_hypo_fraction)
                fault_array=whole_fault[ifaults,:]
                Dstrike_selected=Dstrike[ifaults,:][:,ifaults]
                Ddip_selected=Ddip[ifaults,:][:,ifaults]
                
                #Determine correlation lengths from effective length.width Leff and Weff
                if Lstrike=='MB2002': #Use scaling
                    #Ls=10**(-2.43+0.49*target_Mw)
                    Ls=2.0+(1./3)*Leff
                elif Lstrike=='auto':
                    Ls=17.7+0.34*Leff
                else:
                    Ls=Lstrike
                if Ldip=='MB2002': #Use scaling
                    #Ld=10**(-1.79+0.38*target_Mw)
                    Ld=1.0+(1./3)*Weff
                elif Ldip=='auto':
                    Ld=6.8+0.4*Weff
                else:
                    Ld=Ldip
                
                #Get the mean uniform slip for the target magnitude
                if mean_slip_name==None:
                    mean_slip,mu=fakequakes.get_mean_slip(target_Mw[kmag],fault_array,vel_mod_file)
                else:
                    foo,mu=fakequakes.get_mean_slip(target_Mw[kmag],fault_array,vel_mod_file)
                    mean_fault=genfromtxt(mean_slip_name)
                    mean_slip=(mean_fault[:,8]**2+mean_fault[:,9]**2)**0.5
                    
                    #keep onlt faults that have man slip inside the fault_array seelcted faults
                    mean_slip=mean_slip[ifaults]
                    
                    #get the area in those selected faults
                    area=fault_array[:,-2]*fault_array[:,-1]
                    
                    #get the moment in those selected faults
                    moment_on_selected=(area*mu*mean_slip).sum()
                    
                    #target moment
                    target_moment=10**(1.5*target_Mw[kmag]+9.1)
                    
                    #How much do I need to upscale?
                    scale_factor=target_moment/moment_on_selected
                    
                    #rescale the slip
                    mean_slip = mean_slip*scale_factor
                    
                    
                    #Make sure mean_slip has no zero slip faults
                    izero=where(mean_slip==0)[0]
                    mean_slip[izero]=slip_tol
                
                #Get correlation matrix
                C=fakequakes.vonKarman_correlation(Dstrike_selected,Ddip_selected,Ls,Ld,hurst)
                
                # Lognormal or not?
                if lognormal==False:
                    #Get covariance matrix
                    C_nonlog=fakequakes.get_covariance(mean_slip,C,target_Mw[kmag],fault_array,vel_mod_file,slip_standard_deviation) 
                    #Get eigen values and eigenvectors
                    eigenvals,V=fakequakes.get_eigen(C_nonlog)
                    #Generate fake slip pattern
                    rejected=True
                    while rejected==True:
#                        slip_unrectified,success=make_KL_slip(fault_array,num_modes,eigenvals,V,mean_slip,max_slip,lognormal=False,seed=kfault)
                        slip_unrectified,success=fakequakes.make_KL_slip(fault_array,num_modes,eigenvals,V,mean_slip,max_slip,lognormal=False,seed=None)
                        slip,rejected,percent_negative=fakequakes.rectify_slip(slip_unrectified,percent_reject=13)
                        if rejected==True:
                            print('... ... ... negative slip threshold exceeeded with %d%% negative slip. Recomputing...' % (percent_negative))
                else:
                    #Get lognormal values
                    C_log,mean_slip_log=fakequakes.get_lognormal(mean_slip,C,target_Mw[kmag],fault_array,vel_mod_file,slip_standard_deviation)               
                    #Get eigen values and eigenvectors
                    eigenvals,V=fakequakes.get_eigen(C_log)
                    #Generate fake slip pattern
#                    slip,success=make_KL_slip(fault_array,num_modes,eigenvals,V,mean_slip_log,max_slip,lognormal=True,seed=kfault)
                    slip,success=fakequakes.make_KL_slip(fault_array,num_modes,eigenvals,V,mean_slip_log,max_slip,lognormal=True,seed=None)
            
                #Slip pattern sucessfully made, moving on.
                #Rigidities
                foo,mu=fakequakes.get_mean_slip(target_Mw[kmag],whole_fault,vel_mod_file)
                fault_out[:,13]=mu
                
                #Calculate moment and magnitude of fake slip pattern
                M0=sum(slip*fault_out[ifaults,10]*fault_out[ifaults,11]*mu[ifaults])
                Mw=(2./3)*(log10(M0)-9.1)
                
                #Check max_slip_rule
                if max_slip_rule==True:
                    
                    max_slip_from_rule=10**(-4.94+0.71*Mw) #From Allen & Hayes, 2017
                    max_slip_tolerance = 3
                    
                    if slip.max() > max_slip_tolerance*max_slip_from_rule:
                        success = False
                        print('... ... ... max slip condition violated max_slip_rule, recalculating...')
                
                #Force to target magnitude
                if force_magnitude==True:
                    M0_target=10**(1.5*target_Mw[kmag]+9.1)
                    M0_ratio=M0_target/M0
                    #Multiply slip by ratio
                    slip=slip*M0_ratio
                    #Recalculate
                    M0=sum(slip*fault_out[ifaults,10]*fault_out[ifaults,11]*mu[ifaults])
                    Mw=(2./3)*(log10(M0)-9.1)
                    
                #check max_slip again
                if slip.max() > max_slip:
                    success=False
                    print('... ... ... max slip condition violated due to force_magnitude=True, recalculating...')
            
            
            #Get stochastic rake vector
            stoc_rake=fakequakes.get_stochastic_rake(rake,len(slip))
            
            #Place slip values in output variable
            fault_out[ifaults,8]=slip*cos(deg2rad(stoc_rake))
            fault_out[ifaults,9]=slip*sin(deg2rad(stoc_rake))
            
            #Move hypocenter to somewhere with a susbtantial fraction of peak slip
#            slip_fraction=0.25
#            islip=where(slip>slip.max()*slip_fraction)[0]
#            shuffle(islip) #randomize
#            hypo_fault=ifaults[islip[0]] #select first from randomized vector
            
            #Calculate and scale rise times
            rise_times=fakequakes.get_rise_times(M0,slip,fault_array,rise_time_depths,stoc_rake)
            
            #Place rise_times in output variable
            fault_out[:,7]=0
            fault_out[ifaults,7]=rise_times
            
            #Calculate rupture onset times
            if force_hypocenter==False: #Use random hypo, otehrwise force hypo to user specified
                hypocenter=whole_fault[hypo_fault,1:4]

            t_onset=fakequakes.get_rupture_onset(home,project_name,slip,fault_array,model_name,hypocenter,rise_time_depths,M0,velmod)
            fault_out[:,12]=0
            fault_out[ifaults,12]=t_onset
            
            #Calculate location of moment centroid
            centroid_lon,centroid_lat,centroid_z=fakequakes.get_centroid(fault_out)
            
            #Write to file
            run_number=str(ncpus*realization+rank).rjust(6,'0')
            outfile=home+project_name+'/output/ruptures/'+run_name+'.'+run_number+'.rupt'
            savetxt(outfile,fault_out,fmt='%d\t%10.6f\t%10.6f\t%8.4f\t%7.2f\t%7.2f\t%4.1f\t%5.2f\t%5.2f\t%5.2f\t%10.2f\t%10.2f\t%5.2f\t%.6e',header='No,lon,lat,z(km),strike,dip,rise,dura,ss-slip(m),ds-slip(m),ss_len(m),ds_len(m),rupt_time(s),rigidity(Pa)')
            
            #Write log file
            logfile=home+project_name+'/output/ruptures/'+run_name+'.'+run_number+'.log'
            f=open(logfile,'w')
            f.write('Scenario calculated at '+strftime("%Y-%m-%d %H:%M:%S", gmtime())+' GMT\n')
            f.write('Project name: '+project_name+'\n')
            f.write('Run name: '+run_name+'\n')
            f.write('Run number: '+run_number+'\n')
            f.write('Velocity model: '+model_name+'\n')
            f.write('No. of KL modes: '+str(num_modes)+'\n')
            f.write('Hurst exponent: '+str(hurst)+'\n')
            f.write('Corr. length used Lstrike: %.2f km\n' % Ls)
            f.write('Corr. length used Ldip: %.2f km\n' % Ld)
            f.write('Slip std. dev.: %.3f km\n' % slip_standard_deviation)
            f.write('Maximum length Lmax: %.2f km\n' % Lmax)
            f.write('Maximum width Wmax: %.2f km\n' % Wmax)
            f.write('Effective length Leff: %.2f km\n' % Leff)
            f.write('Effective width Weff: %.2f km\n' % Weff)
            f.write('Target magnitude: Mw %.4f\n' % target_Mw[kmag])
            f.write('Actual magnitude: Mw %.4f\n' % Mw)
            f.write('Hypocenter (lon,lat,z[km]): (%.6f,%.6f,%.2f)\n' %(hypocenter[0],hypocenter[1],hypocenter[2]))
            f.write('Hypocenter time: %s\n' % time_epi)
            f.write('Centroid (lon,lat,z[km]): (%.6f,%.6f,%.2f)\n' %(centroid_lon,centroid_lat,centroid_z))
            f.write('Source time function type: %s' % source_time_function)
            f.close()
            
            realization+=1
コード例 #39
0
    def test_real_time_plotting(self):
        """Test the real-time plotter - must be run interactively."""

        seed_list = [
            "NZ.INZ.10.HHZ", "NZ.JCZ.10.HHZ", "NZ.FOZ.11.HHZ", "NZ.MSZ.10.HHZ",
            "NZ.PYZ.10.HHZ", "NZ.DCZ.10.HHZ", "NZ.WVZ.10.HHZ"
        ]
        client = Client("GEONET")
        inv = client.get_stations(network=seed_list[0].split(".")[0],
                                  station=seed_list[0].split(".")[1],
                                  location=seed_list[0].split(".")[2],
                                  channel=seed_list[0].split(".")[3])
        for seed_id in seed_list[1:]:
            net, sta, loc, chan = seed_id.split('.')
            inv += client.get_stations(network=net,
                                       station=sta,
                                       channel=chan,
                                       location=loc)

        template_cat = client.get_events(starttime=UTCDateTime(2020, 3, 25),
                                         endtime=UTCDateTime(2020, 3, 26))
        tribe = Tribe(templates=[
            Template(event=event, name=event.resource_id.id.split("/")[-1])
            for event in template_cat
        ])
        template_names = cycle([t.name for t in tribe])

        buffer_capacity = 1200
        rt_client = RealTimeClient(server_url="link.geonet.org.nz",
                                   buffer_capacity=buffer_capacity)
        for seed_id in seed_list:
            net, station, _, selector = seed_id.split(".")
            rt_client.select_stream(net=net,
                                    station=station,
                                    selector=selector)

        rt_client.background_run()
        while len(rt_client.stream) < 7:
            # Wait until we have some data
            time.sleep(SLEEP_STEP)

        detections = []
        plotter = EQcorrscanPlot(rt_client=rt_client,
                                 plot_length=60,
                                 tribe=tribe,
                                 inventory=inv,
                                 update_interval=1000,
                                 detections=detections)
        plotter.background_run()

        duration = 0
        step = 5
        while duration < MAX_DURATION:
            detections.append(
                Detection(
                    template_name=next(template_names),
                    detect_time=UTCDateTime.now(),
                    no_chans=999,
                    detect_val=999,
                    threshold=999,
                    threshold_type="MAD",
                    threshold_input=999,
                    typeofdet="unreal",
                    event=Event(picks=[
                        Pick(time=UTCDateTime.now(),
                             waveform_id=WaveformStreamID(seed_string=seed_id))
                        for seed_id in seed_list
                    ])))
            time.sleep(step)
            duration += step
        rt_client.background_stop()
コード例 #40
0
ファイル: findpeaks.py プロジェクト: cjhopp/scripts
def find_peaks2_short(arr, thresh, trig_int, debug=0, starttime=False,
                      samp_rate=1.0):
    r"""Function to determine peaks in an array of data above a certain \
    threshold. Uses a mask to remove data below threshold and finds peaks in \
    what is left.

    :type arr: ndarray
    :param arr: 1-D numpy array is required
    :type thresh: float
    :param thresh: The threshold below which will be considered noise and \
        peaks will not be found in.
    :type trig_int: int
    :param trig_int: The minimum difference in samples between triggers,\
        if multiple peaks within this window this code will find the highest.
    :type debug: int
    :param debug: Optional, debug level 0-5
    :type starttime: osbpy.UTCDateTime
    :param starttime: Starttime for plotting, only used if debug > 2.
    :type samp_rate: float
    :param samp_rate: Sampling rate in Hz, only used for plotting if debug > 2.

    :return: peaks: Lists of tuples of peak values and locations.
    """
    from scipy import ndimage
    import numpy as np
    from obspy import UTCDateTime
    if not starttime:
        starttime = UTCDateTime(0)
    # Set everything below the threshold to zero
    image = np.copy(arr)
    image = np.abs(image)
    image[image < thresh] = 0
    if len(image[image > thresh]) == 0:
        print 'No values over threshold found'
        return []
    if debug > 0:
        print ' '.join(['Found', str(len(image[image > thresh])),
                        'samples above the threshold'])
    initial_peaks = []
    peaks = []
    # Find the peaks
    labeled_image, number_of_objects = ndimage.label(image)
    peak_slices = ndimage.find_objects(labeled_image)
    for peak_slice in peak_slices:
        # print 'Width of peak='+str(peak_slice[0].stop-peak_slice[0].start)
        window = arr[peak_slice[0].start: peak_slice[0].stop]
        initial_peaks.append((max(window),
                              peak_slice[0].start + np.argmax(window)))
    # Sort initial peaks according to amplitude
    peaks_sort = sorted(initial_peaks, key=lambda amplitude: amplitude[0],
                        reverse=True)
    # Debugging
    if debug >= 4:
        for peak in initial_peaks:
            print peak
    if initial_peaks:
        peaks.append(peaks_sort[0])  # Definitely take the biggest peak
        if debug > 3:
            print ' '.join(['Added the biggest peak of', str(peaks[0][0]),
                            'at sample', str(peaks[0][1])])
        if len(initial_peaks) > 1:
            if debug > 3:
                msg = ' '.join(['Multiple peaks found, checking',
                                'them now to see if they overlap'])
                print msg
            for next_peak in peaks_sort:
                # i in xrange(1,len(peaks_sort)):
                # Loop through the amplitude sorted peaks
                # if the next highest amplitude peak is within trig_int of any
                # peak already in peaks then we don't want it, else, add it
                # next_peak=peaks_sort[i]
                if debug > 3:
                    print next_peak
                for peak in peaks:
                    add = False
                    # Use add as a switch for whether or not to append
                    # next peak to peaks, if once gone through all the peaks
                    # it is True, then we will add it, otherwise we won't!
                    if abs(next_peak[1]-peak[1]) < trig_int:
                        if debug > 3:
                            msg = ' '.join(['Difference in time is',
                                            str(next_peak[1]-peak[1]), '\n',
                                            'Which is less than',
                                            str(trig_int)])
                            print msg
                        add = False
                        # Need to exit the loop here if false
                        break
                    else:
                        add = True
                if add:
                    if debug > 3:
                        msg = ' '.join(['Adding peak of', str(next_peak[0]),
                                        'at sample', str(next_peak[1])])
                        print msg
                    peaks.append(next_peak)
                elif debug > 3:
                    msg = ' '.join(['I did not add peak of', str(next_peak[0]),
                                    'at sample', str(next_peak[1])])
                    print msg

        if debug >= 3:
            from eqcorrscan.utils import EQcorrscan_plotting
            _fname = ''.join(['peaks_',
                              starttime.datetime.strftime('%Y-%m-%d'),
                              '.pdf'])
            EQcorrscan_plotting.peaks_plot(image, starttime, samp_rate, True,
                                           peaks, _fname)
        peaks = sorted(peaks, key=lambda time: time[1], reverse=False)
        return peaks
    else:
        print 'No peaks for you!'
        return peaks
コード例 #41
0
 def end_effective_time(self, value):
     if value is None:
         self._end_effective_time = None
         return
     self._end_effective_time = UTCDateTime(value)
コード例 #42
0
ファイル: findpeaks.py プロジェクト: cjhopp/scripts
def find_peaks_dep(arr, thresh, trig_int, debug=0, starttime=False,
                   samp_rate=1.0):
    r"""Function to determine peaks in an array of data above a certain \
    threshold.

    Depreciated peak-finding routine, very slow, but accurate.  If all else \
    fails this one should work.

    :type arr: ndarray
    :param arr: 1-D numpy array is required
    :type thresh: float
    :param thresh: The threshold below which will be considered noise and \
        peaks will not be found in.
    :type trig_int: int
    :param trig_int: The minimum difference in samples between triggers,\
        if multiple peaks within this window this code will find the highest.
    :type starttime: osbpy.UTCDateTime
    :param starttime: Starttime for plotting, only used if debug > 2.
    :type samp_rate: float
    :param samp_rate: Sampling rate in Hz, only used for plotting if debug > 2.

    :return: peaks: Lists of tuples of peak values and locations.
    """
    import numpy as np
    from obspy import UTCDateTime
    if not starttime:
        starttime = UTCDateTime(0)
    # Perform some checks
    if trig_int < 3:
        import sys
        print 'Trigger interval must be greater than 2 samples to find maxima'
        sys.exit()
    # from joblib import Parallel, delayed
    # Will find peaks in the absolute then transfer these to the true values
    sig = np.abs(arr) - thresh
    true_peaks = []
    for i in xrange(int(trig_int), int(len(sig) - trig_int), int(trig_int)):
        window = sig[i - trig_int: i + trig_int]
        # Define a moving window containing data from +/- the trigger iterval
        peaks = []
        locs = []
        for j in xrange(1, len(window) - 1):
            # Find all turning points within the window
            if window[j] > 0.0 and window[j] > window[j+1] and\
               window[j] > window[j - 1]:
                peaks.append(window[j])
                locs.append(i - trig_int + j)
        # Find maximum peak in window
        if peaks:
            true_peaks.append((np.max(np.array(peaks)),
                               locs[np.argmax(np.array(peaks))]))
    # Get unique values
    peaks = sorted(list(set(true_peaks)), key=lambda loc: loc[1])
    # Find highest peak in peaks within trig_int of each other
    for i in xrange(1, len(peaks) - 1):
        if peaks[i + 1][1]-peaks[i][1] < trig_int:
            if peaks[i][0] < peaks[i + 1][0]:
                peaks[i] = peaks[i + 1]
            else:
                peaks[i + 1] = peaks[i]
        elif peaks[i][1] - peaks[i - 1][1] < trig_int:
            if peaks[i][0] < peaks[i - 1][0]:
                peaks[i] = peaks[i - 1]
            else:
                peaks[i - 1] = peaks[i]
    peaks = sorted(list(set(peaks)), key=lambda loc: loc[1])
    if debug >= 3:
        from eqcorrscan.utils import EQcorrscan_plotting
        _fname = ''.join(['peaks_',
                          starttime.datetime.strftime('%Y-%m-%d'),
                          '.pdf'])
        EQcorrscan_plotting.peaks_plot(arr, starttime, samp_rate, True, peaks,
                                       _fname)
    return peaks
コード例 #43
0
ファイル: scan.py プロジェクト: AntonyButcher/obspy
def main(argv=None):
    parser = ArgumentParser(prog='obspy-scan', description=__doc__.strip(),
                            formatter_class=RawDescriptionHelpFormatter)
    parser.add_argument('-V', '--version', action='version',
                        version='%(prog)s ' + __version__)
    parser.add_argument('-f', '--format', choices=ENTRY_POINTS['waveform'],
                        help='Optional, the file format.\n' +
                             ' '.join(__doc__.split('\n')[-4:]))
    parser.add_argument('-v', '--verbose', action='store_true',
                        help='Optional. Verbose output.')
    parser.add_argument('-n', '--non-recursive',
                        action='store_false', dest='recursive',
                        help='Optional. Do not descend into directories.')
    parser.add_argument('-i', '--ignore-links', action='store_true',
                        help='Optional. Do not follow symbolic links.')
    parser.add_argument('--start-time', default=None, type=UTCDateTime,
                        help='Optional, a UTCDateTime compatible string. ' +
                             'Only visualize data after this time and set ' +
                             'time-axis axis accordingly.')
    parser.add_argument('--end-time', default=None, type=UTCDateTime,
                        help='Optional, a UTCDateTime compatible string. ' +
                             'Only visualize data before this time and set ' +
                             'time-axis axis accordingly.')
    parser.add_argument('--id', action='append',
                        help='Optional, a SEED channel identifier '
                             "(e.g. 'GR.FUR..HHZ'). You may provide this " +
                             'option multiple times. Only these ' +
                             'channels will be plotted.')
    parser.add_argument('-t', '--event-time', default=None, type=UTCDateTime,
                        action='append',
                        help='Optional, a UTCDateTime compatible string ' +
                             "(e.g. '2010-01-01T12:00:00'). You may provide " +
                             'this option multiple times. These times get ' +
                             'marked by vertical lines in the plot. ' +
                             'Useful e.g. to mark event origin times.')
    parser.add_argument('-w', '--write', default=None,
                        help='Optional, npz file for writing data '
                             'after scanning waveform files')
    parser.add_argument('-l', '--load', default=None,
                        help='Optional, npz file for loading data '
                             'before scanning waveform files')
    parser.add_argument('--no-x', action='store_true',
                        help='Optional, Do not plot crosses.')
    parser.add_argument('--no-gaps', action='store_true',
                        help='Optional, Do not plot gaps.')
    parser.add_argument('-o', '--output', default=None,
                        help='Save plot to image file (e.g. out.pdf, ' +
                             'out.png) instead of opening a window.')
    parser.add_argument('--print-gaps', action='store_true',
                        help='Optional, prints a list of gaps at the end.')
    parser.add_argument('paths', nargs='*',
                        help='Files or directories to scan.')

    # Deprecated arguments
    action = _DeprecatedArgumentAction('--endtime', '--end-time')
    parser.add_argument('--endtime', type=UTCDateTime,
                        action=action, help=SUPPRESS)

    action = _DeprecatedArgumentAction('--event-times', '--event-time')
    parser.add_argument('--event-times', action=action, help=SUPPRESS)

    action = _DeprecatedArgumentAction('--ids', '--id')
    parser.add_argument('--ids', action=action, help=SUPPRESS)

    action = _DeprecatedArgumentAction('--nox', '--no-x',
                                       real_action='store_true')
    parser.add_argument('--nox', dest='no_x', nargs=0,
                        action=action, help=SUPPRESS)

    action = _DeprecatedArgumentAction('--nogaps', '--no-gaps',
                                       real_action='store_true')
    parser.add_argument('--nogaps', dest='no_gaps', nargs=0,
                        action=action, help=SUPPRESS)

    action = _DeprecatedArgumentAction('--starttime', '--start-time')
    parser.add_argument('--starttime', type=UTCDateTime,
                        action=action, help=SUPPRESS)

    args = parser.parse_args(argv)

    # Print help and exit if no arguments are given
    if len(args.paths) == 0 and args.load is None:
        parser.error('No paths specified.')

    # Use recursively parsing function?
    if args.recursive:
        parse_func = recursive_parse
    else:
        parse_func = parse_file_to_dict

    if args.output is not None:
        import matplotlib
        matplotlib.use("agg")
    global date2num
    from matplotlib.dates import date2num, num2date
    from matplotlib.patches import Rectangle
    from matplotlib.collections import PatchCollection
    import matplotlib.pyplot as plt

    fig = plt.figure()
    ax = fig.add_subplot(111)

    # Plot vertical lines if option 'event_times' was specified
    if args.event_time:
        times = map(date2num, args.event_time)
        for time in times:
            ax.axvline(time, color='k')
    # Deprecated version (don't plot twice)
    if args.event_times and not args.event_time:
        times = args.event_times.split(',')
        times = map(UTCDateTime, times)
        times = map(date2num, times)
        for time in times:
            ax.axvline(time, color='k')

    if args.start_time:
        args.start_time = date2num(args.start_time)
    elif args.starttime:
        # Deprecated version
        args.start_time = date2num(args.starttime)
    if args.end_time:
        args.end_time = date2num(args.end_time)
    elif args.endtime:
        # Deprecated version
        args.end_time = date2num(args.endtime)

    # Generate dictionary containing nested lists of start and end times per
    # station
    data = {}
    samp_int = {}
    counter = 1
    if args.load:
        load_npz(args.load, data, samp_int)
    for path in args.paths:
        counter = parse_func(data, samp_int, path, counter, args.format,
                             args.verbose, args.ignore_links)
    if not data:
        print("No waveform data found.")
        return
    if args.write:
        write_npz(args.write, data, samp_int)

    # Loop through this dictionary
    ids = list(data.keys())
    # Handle deprecated argument
    if args.ids and not args.id:
        args.id = args.ids.split(',')
    # restrict plotting of results to given ids
    if args.id:
        ids = [x for x in ids if x in args.id]
    ids = sorted(ids)[::-1]
    labels = [""] * len(ids)
    print('\n')
    for _i, _id in enumerate(ids):
        labels[_i] = ids[_i]
        data[_id].sort()
        startend = np.array(data[_id])
        if len(startend) == 0:
            continue
        # restrict plotting of results to given start/end time
        if args.start_time:
            startend = startend[startend[:, 1] > args.start_time]
        if len(startend) == 0:
            continue
        if args.start_time:
            startend = startend[startend[:, 0] < args.end_time]
        if len(startend) == 0:
            continue
        timerange = startend[:, 1].max() - startend[:, 0].min()
        if timerange == 0.0:
            warnings.warn('Zero sample long data for _id=%s, skipping' % _id)
            continue

        startend_compressed = compressStartend(startend, 1000)

        offset = np.ones(len(startend)) * _i  # generate list of y values
        ax.xaxis_date()
        if not args.no_x:
            ax.plot_date(startend[:, 0], offset, 'x', linewidth=2)
        ax.hlines(offset[:len(startend_compressed)], startend_compressed[:, 0],
                  startend_compressed[:, 1], 'b', linewidth=2, zorder=3)
        # find the gaps
        diffs = startend[1:, 0] - startend[:-1, 1]  # currend.start - last.end
        gapsum = diffs[diffs > 0].sum()
        perc = (timerange - gapsum) / timerange
        labels[_i] = labels[_i] + "\n%.1f%%" % (perc * 100)
        gap_indices = diffs > 1.8 * np.array(samp_int[_id][:-1])
        gap_indices = np.concatenate((gap_indices, [False]))
        if any(gap_indices):
            # don't handle last end time as start of gap
            gaps_start = startend[gap_indices, 1]
            gaps_end = startend[np.roll(gap_indices, 1), 0]
            if not args.no_gaps and any(gap_indices):
                rects = [Rectangle((start_, offset[0] - 0.4),
                                   end_ - start_, 0.8)
                         for start_, end_ in zip(gaps_start, gaps_end)]
                ax.add_collection(PatchCollection(rects, color="r"))
            if args.print_gaps:
                for start_, end_ in zip(gaps_start, gaps_end):
                    start_, end_ = num2date((start_, end_))
                    start_ = UTCDateTime(start_.isoformat())
                    end_ = UTCDateTime(end_.isoformat())
                    print("%s %s %s %.3f" % (_id, start_, end_, end_ - start_))

    # Pretty format the plot
    ax.set_ylim(0 - 0.5, _i + 0.5)
    ax.set_yticks(np.arange(_i + 1))
    ax.set_yticklabels(labels, family="monospace", ha="right")
    # set x-axis limits according to given start/end time
    if args.start_time:
        ax.set_xlim(left=args.start_time, auto=None)
    if args.end_time:
        ax.set_xlim(right=args.end_time, auto=None)
    fig.autofmt_xdate()  # rotate date
    plt.subplots_adjust(left=0.2)
    if args.output is None:
        plt.show()
    else:
        fig.set_dpi(72)
        height = len(ids) * 0.5
        height = max(4, height)
        fig.set_figheight(height)
        # tight_layout() only available from matplotlib >= 1.1
        try:
            plt.tight_layout()
            days = ax.get_xlim()
            days = days[1] - days[0]
            width = max(6, days / 30.)
            width = min(width, height * 4)
            fig.set_figwidth(width)
            plt.subplots_adjust(top=1, bottom=0, left=0, right=1)
            plt.tight_layout()
        except:
            pass
        fig.savefig(args.output)
    sys.stdout.write('\n')
コード例 #44
0
ファイル: test_utcdatetime.py プロジェクト: avuan/obspy
 def test_utcnow(self):
     """
     Test utcnow class method of UTCDateTime class.
     """
     dt = UTCDateTime()
     self.assertTrue(UTCDateTime.utcnow() >= dt)
コード例 #45
0
ファイル: scan.py プロジェクト: junlysky/obspy
    def analyze_parsed_data(self, print_gaps=False, starttime=None,
                            endtime=None, seed_ids=None):
        """
        Prepare information for plotting.

        Information is stored in a dictionary as ``scanner._info``, only
        containing these data matching the given parameters.

        :type print_gaps: bool
        :param print_gaps: Whether to print information on all encountered gaps
            and overlaps.
        :type starttime: :class:`~obspy.core.utcdatetime.UTCDateTime`
        :param starttime: Whether to use a fixed start time for the plot and
            data percentage calculation.
        :type endtime: :class:`~obspy.core.utcdatetime.UTCDateTime`
        :param endtime: Whether to use a fixed end time for the plot and
            data percentage calculation.
        :type seed_ids: list of str
        :param endtime: Whether to consider only a specific set of SEED IDs
            (e.g. ``seed_ids=["GR.FUR..BHZ", "GR.WET..BHZ"]``) or just all SEED
            IDs encountered in data (if left ``None``).
        """
        data = self.data
        samp_int = self.samp_int
        if starttime is not None:
            starttime = starttime.matplotlib_date
        if endtime is not None:
            endtime = endtime.matplotlib_date
        # either use ids specified by user or use ids based on what data we
        # have parsed
        ids = seed_ids or list(data.keys())
        ids = sorted(ids)[::-1]
        if self.verbose:
            print('\n')
        self._info = {}
        for _i, _id in enumerate(ids):
            info = {"gaps": [], "overlaps": [], "data_starts": [],
                    "data_startends_compressed": [], "percentage": None}
            self._info[_id] = info
            gap_info = info["gaps"]
            overlap_info = info["overlaps"]
            # sort data list and sampling rate list
            if _id in data:
                startend = np.array(data[_id])
                _samp_int = np.array(samp_int[_id])
                indices = np.lexsort((startend[:, 1], startend[:, 0]))
                startend = startend[indices]
                _samp_int = _samp_int[indices]
            else:
                startend = np.array([])
                _samp_int = np.array([])
            if len(startend) == 0:
                if not (starttime and endtime):
                    continue
                gap_info.append((starttime, endtime))
                if print_gaps:
                    print("%s %s %s %.3f" % (
                        _id, starttime, endtime, endtime - starttime))
                continue
            # restrict plotting of results to given start/end time
            if starttime or endtime:
                indices = np.ones(len(startend), dtype=np.bool_)
                if starttime:
                    indices &= startend[:, 1] > starttime
                if endtime:
                    indices &= startend[:, 0] < endtime
                startend = startend[indices]
                _samp_int = _samp_int[indices]
            if len(startend) == 0:
                # if both start and endtime are given, add it to gap info
                if starttime and endtime:
                    gap_info.append((starttime, endtime))
                continue
            data_start = startend[:, 0].min()
            data_end = startend[:, 1].max()
            timerange_start = starttime or data_start
            timerange_end = endtime or data_end
            timerange = timerange_end - timerange_start
            if timerange == 0.0:
                msg = 'Zero sample long data for _id=%s, skipping' % _id
                warnings.warn(msg)
                continue

            startend_compressed = compress_start_end(startend.copy(), 1000,
                                                     merge_overlaps=True)

            info["data_starts"] = startend[:, 0]
            info["data_startends_compressed"] = startend_compressed

            # find the gaps
            # currend.start - last.end
            diffs = startend[1:, 0] - startend[:-1, 1]
            gapsum = diffs[diffs > 0].sum()
            # if start- and/or endtime is specified, add missing data at
            # start/end to gap sum
            has_gap = False
            gap_at_start = (
                starttime and
                data_start > starttime and
                data_start - starttime)
            gap_at_end = (
                endtime and
                endtime > data_end and
                endtime - data_end)
            if gap_at_start:
                gapsum += gap_at_start
                has_gap = True
            if gap_at_end:
                gapsum += gap_at_end
                has_gap = True
            info["percentage"] = (timerange - gapsum) / timerange * 100
            # define a gap as over 0.8 delta after expected sample time
            gap_indices = diffs > 0.8 * _samp_int[:-1]
            gap_indices = np.append(gap_indices, False)
            # define an overlap as over 0.8 delta before expected sample time
            overlap_indices = diffs < -0.8 * _samp_int[:-1]
            overlap_indices = np.append(overlap_indices, False)
            has_gap |= any(gap_indices)
            has_gap |= any(overlap_indices)
            if has_gap:
                # don't handle last end time as start of gap
                gaps_start = startend[gap_indices, 1]
                gaps_end = startend[np.roll(gap_indices, 1), 0]
                overlaps_end = startend[overlap_indices, 1]
                overlaps_start = startend[np.roll(overlap_indices, 1), 0]
                # but now, manually add start/end for gaps at start/end of user
                # specified start/end times
                if gap_at_start:
                    gaps_start = np.append(gaps_start, starttime)
                    gaps_end = np.append(gaps_end, data_start)
                if gap_at_end:
                    gaps_start = np.append(gaps_start, data_end)
                    gaps_end = np.append(gaps_end, endtime)

                _starts = np.concatenate((gaps_start, overlaps_end))
                _ends = np.concatenate((gaps_end, overlaps_start))
                sort_order = np.argsort(_starts)
                _starts = _starts[sort_order]
                _ends = _ends[sort_order]
                for start_, end_ in zip(_starts, _ends):
                    if print_gaps:
                        start__, end__ = num2date((start_, end_))
                        start__ = UTCDateTime(start__.isoformat())
                        end__ = UTCDateTime(end__.isoformat())
                        print("{} {} {} {:.3f}".format(
                            _id, start__, end__, end__ - start__))
                    if start_ < end_:
                        gap_info.append((start_, end_))
                    else:
                        overlap_info.append((start_, end_))
コード例 #46
0
ファイル: par_download.py プロジェクト: echolite/ANTS
def par_download():
    
    """
    
    Parallel download from IRIS DMC
    
    """

    #==============================================================================================
    # preliminaries
    #==============================================================================================
    
    comm = MPI.COMM_WORLD
    rank = comm.Get_rank()
    size=comm.Get_size()
    
    #==============================================================================================
    #- MASTER process:
    #- reads in xmlinput
    #- creates output directory
    #- creates a list of input files
    #==============================================================================================
    
    if rank==0:
        
        
       
       datadir=cfg.datadir
       dat=rxml.read_xml(os.path.join(cfg.inpdir,'input_download.xml'))[1]
      
       # network, channel, location and station list
       stalist=os.path.join(cfg.inpdir,'downloadlist.txt')
       fh=open(stalist,'r')
       ids=fh.read().split('\n')
       
    #==============================================================================================
    #- All processes:
    #- receive the input; and the list of files
    #- read variables from broadcasted input
    #==============================================================================================
    
    else:
        ids=list()
        dat=list()    
       
    ids=comm.bcast(ids, root=0)
    dat=comm.bcast(dat, root=0)
    
    datadir=cfg.datadir
    targetloc=datadir+'raw/latest/rank'+str(rank)+'/'
    
    if os.path.isdir(targetloc)==False:
        cmd='mkdir '+targetloc
        os.system(cmd)
    
    
    # Directory where executable is located
    exdir=dat['exdir']
    
    # Verbose?
    if dat['verbose']=='1':
        v=True
        vfetchdata='-v '
    else:
        vfetchdata=''
        
    # Quality?
    quality = dat['quality']
        
    # time interval of request
    t1=dat['time']['starttime']
    t1str=UTCDateTime(t1).strftime('%Y.%j.%H.%M.%S')
    t2=dat['time']['endtime']
    t2str=UTCDateTime(t2).strftime('%Y.%j.%H.%M.%S')
 
    # data segment length
    if dat['time']['len']==None:
        winlen=UTCDateTime(t2)-UTCDateTime(t1)
    else:
        winlen = int(dat['time']['len'])
    
    # minimum length
    minlen=dat['time']['minlen']

    # geographical region
    lat_min=dat['region']['lat_min']
    lat_max=dat['region']['lat_max']
    lon_min=dat['region']['lon_min']
    lon_max=dat['region']['lon_max']
    
    #==============================================================================================
    #- Assign each rank its own chunk of input
    #==============================================================================================

    clen=int(float(len(ids))/float(size))
    chunk=(rank*clen, (rank+1)*clen)
    myids=ids[chunk[0]:chunk[1]]
    if rank==size-1:
        myids=ids[chunk[0]:]
    
    #==================================================================================
    # Input files loop
    #==================================================================================
      
    for id in myids:
        
        if id=='': continue
        
        t = UTCDateTime(t1)
        while t < UTCDateTime(t2):
            
            tstart = UTCDateTime(t).strftime('%Y-%m-%d,%H:%M:%S')
            tstartstr = UTCDateTime(t).strftime('%Y.%j.%H.%M.%S')
            
            tstep = min((UTCDateTime(t)+winlen),UTCDateTime(t2)).\
            strftime('%Y-%m-%d,%H:%M:%S')
            tstepstr = min((UTCDateTime(t)+winlen),UTCDateTime(t2)).\
            strftime('%Y.%j.%H.%M.%S')
            
            
            #-Formulate a polite request
            filename=targetloc+id+'.'+tstartstr+'.'+tstepstr+'.mseed'
            if os.path.exists(filename)==False:
                network=id.split('.')[0]
                station=id.split('.')[1]
                channel=id.split('.')[3]
                #print network, station, location, channel
                print('\n Rank '+str(rank)+'\n',file=None)
                print('\n Attempting to download data from: '+id+'\n',file=None)
                print(filename,None)
                
                reqstring=exdir+'/FetchData '+vfetchdata+' -N '+network+ \
                 ' -S '+station+' -C '+channel+' -s '+tstart+' -e '+tstep+ \
                 ' -msl '+minlen+' --lat '+lat_min+':'+lat_max+ \
                ' --lon '+lon_min+':'+lon_max+' -o '+filename+' -Q '+quality
                  
                os.system(reqstring)
            t += winlen
          
     
    # Clean up (some files come back with 0 data)
    stafile=dat['ids']
    t1s=t1str.split('.')[0]+'.'+t1str.split('.')[1]
    t2s=t2str.split('.')[0]+'.'+t2str.split('.')[1]
    
    cmd=('./UTIL/cleandir.sh '+targetloc)     
    os.system(cmd)
    os.system('mv '+targetloc+'* '+targetloc+'/..')
    os.system('rmdir '+targetloc)
    
    
    # Download resp files for all epochs!
    respfileloc=datadir+'resp/'
        
    if os.path.isdir(respfileloc)==False:
        cmd='mkdir '+respfileloc
        os.system(cmd)
    
    
    for id in myids:
        if id=='': continue
        
        network=id.split('.')[0]
        station=id.split('.')[1]
        channel=id.split('.')[3]
        
        print('\n Downloading response information from: '+id+'\n')
        reqstring=exdir+'/FetchData '+vfetchdata+' -N '+network+ ' -S '+station+' -C '+channel+\
        ' --lat '+lat_min+':'+lat_max+' --lon '+lon_min+':'+lon_max+' -rd '+respfileloc
        os.system(reqstring)
        
    comm.Barrier()

    
    if rank==0:
        outfile=os.path.join(cfg.datadir,'raw/latest/download_report.txt')
        outf=open(outfile,'w')
        
        print('Attempting to download data from stations: \n',file=outf)
        print('****************************************** \n',file=outf)
        for id in ids:
            print(id,file=outf)
        print('****************************************** \n',file=outf)
        stalist=os.path.join(cfg.inpdir,'downloadlist.txt')
        fh=open(stalist,'r')
        ids=fh.read().split('\n')
        
        noreturn=[]
        
        for id in ids:
            if id=='': continue
            fls=glob(os.path.join(cfg.datadir,'raw/latest',id+'*'))
            if fls != []:
                print('Files downloaded for id: '+id,file=outf)
                print('First file: '+fls[0],file=outf)
                print('Last file: '+fls[-1],file=outf)
                print('****************************************** \n',file=outf)    
            else: 
                noreturn.append(id)
            
        if noreturn != []:
            print('NO files downloaded for: \n',file=outf)
            
            print(noreturn,file=outf)
     
        print('****************************************** \n',file=outf)
        print('Download parameters were: \n',file=outf)
        print('****************************************** \n',file=outf)
        outf.close()
        
        os.system('cat '+cfg.inpdir+'/input_download.xml >> '+outfile)
        
コード例 #47
0
ファイル: scan.py プロジェクト: Keita1/obspy
def main(argv=None):
    parser = ArgumentParser(prog='obspy-scan', description=__doc__.strip(),
                            formatter_class=RawDescriptionHelpFormatter)
    parser.add_argument('-V', '--version', action='version',
                        version='%(prog)s ' + __version__)
    parser.add_argument('-f', '--format', choices=ENTRY_POINTS['waveform'],
                        help='Optional, the file format.\n' +
                             ' '.join(__doc__.split('\n')[-4:]))
    parser.add_argument('-v', '--verbose', action='store_true',
                        help='Optional. Verbose output.')
    parser.add_argument('-q', '--quiet', action='store_true',
                        help='Optional. Be quiet. Overwritten by --verbose '
                             'flag.')
    parser.add_argument('-n', '--non-recursive',
                        action='store_false', dest='recursive',
                        help='Optional. Do not descend into directories.')
    parser.add_argument('-i', '--ignore-links', action='store_true',
                        help='Optional. Do not follow symbolic links.')
    parser.add_argument('--start-time', default=None, type=UTCDateTime,
                        help='Optional, a UTCDateTime compatible string. ' +
                             'Only visualize data after this time and set ' +
                             'time-axis axis accordingly.')
    parser.add_argument('--end-time', default=None, type=UTCDateTime,
                        help='Optional, a UTCDateTime compatible string. ' +
                             'Only visualize data before this time and set ' +
                             'time-axis axis accordingly.')
    parser.add_argument('--id', action='append',
                        help='Optional, a SEED channel identifier '
                             "(e.g. 'GR.FUR..HHZ'). You may provide this " +
                             'option multiple times. Only these ' +
                             'channels will be plotted.')
    parser.add_argument('-t', '--event-time', default=None, type=UTCDateTime,
                        action='append',
                        help='Optional, a UTCDateTime compatible string ' +
                             "(e.g. '2010-01-01T12:00:00'). You may provide " +
                             'this option multiple times. These times get ' +
                             'marked by vertical lines in the plot. ' +
                             'Useful e.g. to mark event origin times.')
    parser.add_argument('-w', '--write', default=None,
                        help='Optional, npz file for writing data '
                             'after scanning waveform files')
    parser.add_argument('-l', '--load', default=None,
                        help='Optional, npz file for loading data '
                             'before scanning waveform files')
    parser.add_argument('--no-x', action='store_true',
                        help='Optional, Do not plot crosses.')
    parser.add_argument('--no-gaps', action='store_true',
                        help='Optional, Do not plot gaps.')
    parser.add_argument('-o', '--output', default=None,
                        help='Save plot to image file (e.g. out.pdf, ' +
                             'out.png) instead of opening a window.')
    parser.add_argument('--print-gaps', action='store_true',
                        help='Optional, prints a list of gaps at the end.')
    parser.add_argument('paths', nargs='*',
                        help='Files or directories to scan.')

    args = parser.parse_args(argv)

    if args.output is not None:
        MatplotlibBackend.switch_backend("AGG", sloppy=False)

    # Print help and exit if no arguments are given
    if len(args.paths) == 0 and args.load is None:
        parser.error('No paths specified.')

    # Use recursively parsing function?
    if args.recursive:
        parse_func = recursive_parse
    else:
        parse_func = parse_file_to_dict

    from matplotlib.dates import date2num, num2date
    from matplotlib.ticker import FuncFormatter
    from matplotlib.patches import Rectangle
    from matplotlib.collections import PatchCollection
    import matplotlib.pyplot as plt

    fig = plt.figure()
    ax = fig.add_subplot(111)

    # Plot vertical lines if option 'event_time' was specified
    if args.event_time:
        times = [date2num(t.datetime) for t in args.event_time]
        for time in times:
            ax.axvline(time, color='k')

    if args.start_time:
        args.start_time = date2num(args.start_time.datetime)
    if args.end_time:
        args.end_time = date2num(args.end_time.datetime)

    # Generate dictionary containing nested lists of start and end times per
    # station
    data = {}
    samp_int = {}
    counter = 1
    if args.load:
        load_npz(args.load, data, samp_int)
    for path in args.paths:
        counter = parse_func(data, samp_int, path, counter, args.format,
                             verbose=args.verbose, quiet=args.quiet,
                             ignore_links=args.ignore_links)
    if not data:
        if args.verbose or not args.quiet:
            print("No waveform data found.")
        return
    if args.write:
        write_npz(args.write, data, samp_int)

    # either use ids specified by user or use ids based on what data we have
    # parsed
    ids = args.id or list(data.keys())
    ids = sorted(ids)[::-1]
    labels = [""] * len(ids)
    if args.verbose or not args.quiet:
        print('\n')
    for _i, _id in enumerate(ids):
        labels[_i] = ids[_i]
        # sort data list and sampling rate list
        if _id in data:
            startend = np.array(data[_id])
            _samp_int = np.array(samp_int[_id])
            indices = np.lexsort((startend[:, 1], startend[:, 0]))
            startend = startend[indices]
            _samp_int = _samp_int[indices]
        else:
            startend = np.array([])
            _samp_int = np.array([])
        if len(startend) == 0:
            if not (args.start_time and args.end_time):
                continue
            if not args.no_gaps:
                rects = [Rectangle((args.start_time, _i - 0.4),
                                   args.end_time - args.start_time, 0.8)]
                ax.add_collection(PatchCollection(rects, color="r"))
            if args.print_gaps and (args.verbose or not args.quiet):
                print("%s %s %s %.3f" % (
                    _id, args.start_time, args.end_time,
                    args.end_time - args.start_time))
            continue
        # restrict plotting of results to given start/end time
        if args.start_time:
            indices = startend[:, 1] > args.start_time
            startend = startend[indices]
            _samp_int = _samp_int[indices]
        if len(startend) == 0:
            continue
        if args.end_time:
            indices = startend[:, 0] < args.end_time
            startend = startend[indices]
            _samp_int = _samp_int[indices]
        if len(startend) == 0:
            continue
        data_start = startend[:, 0].min()
        data_end = startend[:, 1].max()
        timerange_start = args.start_time or data_start
        timerange_end = args.end_time or data_end
        timerange = timerange_end - timerange_start
        if timerange == 0.0:
            warnings.warn('Zero sample long data for _id=%s, skipping' % _id)
            continue

        startend_compressed = compress_start_end(startend, 1000)

        offset = np.ones(len(startend)) * _i  # generate list of y values
        if not args.no_x:
            ax.plot(startend[:, 0], offset, 'x', linewidth=2)
        ax.hlines(offset[:len(startend_compressed)], startend_compressed[:, 0],
                  startend_compressed[:, 1], 'b', linewidth=2, zorder=3)
        # find the gaps
        diffs = startend[1:, 0] - startend[:-1, 1]  # currend.start - last.end
        gapsum = diffs[diffs > 0].sum()
        # if start- and/or endtime is specified, add missing data at start/end
        # to gap sum
        has_gap = False
        gap_at_start = (
            args.start_time and
            data_start > args.start_time and
            data_start - args.start_time)
        gap_at_end = (
            args.end_time and
            args.end_time > data_end and
            args.end_time - data_end)
        if args.start_time and gap_at_start:
            gapsum += gap_at_start
            has_gap = True
        if args.end_time and gap_at_end:
            gapsum += gap_at_end
            has_gap = True
        perc = (timerange - gapsum) / timerange
        labels[_i] = labels[_i] + "\n%.1f%%" % (perc * 100)
        gap_indices = diffs > 1.8 * _samp_int[:-1]
        gap_indices = np.append(gap_indices, False)
        has_gap |= any(gap_indices)
        if has_gap:
            # don't handle last end time as start of gap
            gaps_start = startend[gap_indices, 1]
            gaps_end = startend[np.roll(gap_indices, 1), 0]
            if args.start_time and gap_at_start:
                gaps_start = np.append(gaps_start, args.start_time)
                gaps_end = np.append(gaps_end, data_start)
            if args.end_time and gap_at_end:
                gaps_start = np.append(gaps_start, data_end)
                gaps_end = np.append(gaps_end, args.end_time)
            if not args.no_gaps:
                rects = [Rectangle((start_, offset[0] - 0.4), end_ - start_,
                                   0.8)
                         for start_, end_ in zip(gaps_start, gaps_end)]
                ax.add_collection(PatchCollection(rects, color="r"))
            if args.print_gaps:
                for start_, end_ in zip(gaps_start, gaps_end):
                    start_, end_ = num2date((start_, end_))
                    start_ = UTCDateTime(start_.isoformat())
                    end_ = UTCDateTime(end_.isoformat())
                    if args.verbose or not args.quiet:
                        print("%s %s %s %.3f" % (_id, start_, end_,
                                                 end_ - start_))

    # Pretty format the plot
    ax.set_ylim(0 - 0.5, len(ids) - 0.5)
    ax.set_yticks(np.arange(len(ids)))
    ax.set_yticklabels(labels, family="monospace", ha="right")
    fig.autofmt_xdate()  # rotate date
    ax.xaxis_date()
    # set custom formatters to always show date in first tick
    formatter = ObsPyAutoDateFormatter(ax.xaxis.get_major_locator())
    formatter.scaled[1 / 24.] = \
        FuncFormatter(decimal_seconds_format_date_first_tick)
    formatter.scaled.pop(1/(24.*60.))
    ax.xaxis.set_major_formatter(formatter)
    plt.subplots_adjust(left=0.2)
    # set x-axis limits according to given start/end time
    if args.start_time and args.end_time:
        ax.set_xlim(left=args.start_time, right=args.end_time)
    elif args.start_time:
        ax.set_xlim(left=args.start_time, auto=None)
    elif args.end_time:
        ax.set_xlim(right=args.end_time, auto=None)
    else:
        left, right = ax.xaxis.get_data_interval()
        x_axis_range = right - left
        ax.set_xlim(left - 0.05 * x_axis_range, right + 0.05 * x_axis_range)
    if args.output is None:
        plt.show()
    else:
        fig.set_dpi(72)
        height = len(ids) * 0.5
        height = max(4, height)
        fig.set_figheight(height)
        plt.tight_layout()

        if not args.start_time or not args.end_time:
            days = ax.get_xlim()
            days = days[1] - days[0]
        else:
            days = args.end_time - args.start_time

        width = max(6, days / 30.)
        width = min(width, height * 4)
        fig.set_figwidth(width)
        plt.subplots_adjust(top=1, bottom=0, left=0, right=1)
        plt.tight_layout()

        fig.savefig(args.output)
    if args.verbose and not args.quiet:
        sys.stdout.write('\n')
コード例 #48
0
ファイル: test_fields.py プロジェクト: shineusn/obspy
 def test_readDateTime(self):
     field = VariableString(1, "test", 1, 22, 'T', strict=True)
     #1
     orig = b'1992,002,00:00:00.0000~'
     dt = field.read(compatibility.BytesIO(orig))
     self.assertEqual(dt, UTCDateTime(1992, 1, 2))
     self.assertEqual(field.write(dt), b'1992,002~')
     #1
     orig = b'1992,002~'
     dt = field.read(compatibility.BytesIO(orig))
     self.assertEqual(dt, UTCDateTime(1992, 1, 2))
     self.assertEqual(field.write(dt), b'1992,002~')
     #2
     orig = b'1992,005,01:02:03.4567~'
     dt = field.read(compatibility.BytesIO(orig))
     self.assertEqual(dt, UTCDateTime(1992, 1, 5, 1, 2, 3, 456700))
     self.assertEqual(field.write(dt), orig)
     #3
     orig = b'1992,005,01:02:03.0001~'
     dt = field.read(compatibility.BytesIO(orig))
     self.assertEqual(dt, UTCDateTime(1992, 1, 5, 1, 2, 3, 100))
     self.assertEqual(field.write(dt), orig)
     #4
     orig = b'1992,005,01:02:03.1000~'
     dt = field.read(compatibility.BytesIO(orig))
     self.assertEqual(dt, UTCDateTime(1992, 1, 5, 1, 2, 3, 100000))
     self.assertEqual(field.write(dt), orig)
     #5
     orig = b'1987,023,04:23:05.1~'
     dt = field.read(compatibility.BytesIO(orig))
     self.assertEqual(dt, UTCDateTime(1987, 1, 23, 4, 23, 5, 100000))
     self.assertEqual(field.write(dt), b'1987,023,04:23:05.1000~')
     #6
     orig = b'1987,023,04:23:05.123~'
     dt = field.read(compatibility.BytesIO(orig))
     self.assertEqual(dt, UTCDateTime(1987, 1, 23, 4, 23, 5, 123000))
     self.assertEqual(field.write(dt), b'1987,023,04:23:05.1230~')
     #
     orig = b'2008,358,01:30:22.0987~'
     dt = field.read(compatibility.BytesIO(orig))
     self.assertEqual(dt, UTCDateTime(2008, 12, 23, 0o1, 30, 22, 98700))
     self.assertEqual(field.write(dt), orig)
     #
     orig = b'2008,358,01:30:22.9876~'
     dt = field.read(compatibility.BytesIO(orig))
     self.assertEqual(dt, UTCDateTime(2008, 12, 23, 0o1, 30, 22, 987600))
     self.assertEqual(field.write(dt), orig)
     #
     orig = b'2008,358,01:30:22.0005~'
     dt = field.read(compatibility.BytesIO(orig))
     self.assertEqual(dt, UTCDateTime(2008, 12, 23, 0o1, 30, 22, 500))
     self.assertEqual(field.write(dt), orig)
     #
     orig = b'2008,358,01:30:22.0000~'
     dt = field.read(compatibility.BytesIO(orig))
     self.assertEqual(dt, UTCDateTime(2008, 12, 23, 0o1, 30, 22, 0))
     self.assertEqual(field.write(dt), orig)
コード例 #49
0
ファイル: scan.py プロジェクト: Ciack404/obspy
def main(option_list=None):
    parser = OptionParser(__doc__.strip())
    parser.add_option("-f", "--format", default=None,
                      type="string", dest="format",
                      help="Optional, the file format.\n" +
                      " ".join(__doc__.split('\n')[-4:]))
    parser.add_option("-v", "--verbose", default=False,
                      action="store_true", dest="verbose",
                      help="Optional. Verbose output.")
    parser.add_option("-n", "--non-recursive", default=True,
                      action="store_false", dest="recursive",
                      help="Optional. Do not descend into directories.")
    parser.add_option("-i", "--ignore-links", default=False,
                      action="store_true", dest="ignore_links",
                      help="Optional. Do not follow symbolic links.")
    parser.add_option("--starttime", default=None,
                      type="string", dest="starttime",
                      help="Optional, a UTCDateTime compatible string. " +
                      "Only visualize data after this time and set " +
                      "time-axis axis accordingly.")
    parser.add_option("--endtime", default=None,
                      type="string", dest="endtime",
                      help="Optional, a UTCDateTime compatible string. " +
                      "Only visualize data after this time and set " +
                      "time-axis axis accordingly.")
    parser.add_option("--ids", default=None,
                      type="string", dest="ids",
                      help="Optional, a list of SEED channel identifiers " +
                      "separated by commas " +
                      "(e.g. 'GR.FUR..HHZ,BW.MANZ..EHN'. Only these " +
                      "channels will not be plotted.")
    parser.add_option("-t", "--event-times", default=None,
                      type="string", dest="event_times",
                      help="Optional, a list of UTCDateTime compatible " +
                      "strings separated by commas " +
                      "(e.g. '2010-01-01T12:00:00,2010-01-01T13:00:00'). " +
                      "These get marked by vertical lines in the plot. " +
                      "Useful e.g. to mark event origin times.")
    parser.add_option("-w", "--write", default=None,
                      type="string", dest="write",
                      help="Optional, npz file for writing data "
                      "after scanning waveform files")
    parser.add_option("-l", "--load", default=None,
                      type="string", dest="load",
                      help="Optional, npz file for loading data "
                      "before scanning waveform files")
    parser.add_option("--nox", default=False,
                      action="store_true", dest="nox",
                      help="Optional, Do not plot crosses.")
    parser.add_option("--nogaps", default=False,
                      action="store_true", dest="nogaps",
                      help="Optional, Do not plot gaps.")
    parser.add_option("-o", "--output", default=None,
                      type="string", dest="output",
                      help="Save plot to image file (e.g. out.pdf, " +
                      "out.png) instead of opening a window.")
    parser.add_option("--print-gaps", default=False,
                      action="store_true", dest="print_gaps",
                      help="Optional, prints a list of gaps at the end.")
    (options, largs) = parser.parse_args(option_list)

    # Print help and exit if no arguments are given
    if len(largs) == 0 and options.load is None:
        parser.print_help()
        sys.exit(1)

    # Use recursively parsing function?
    if options.recursive:
        parse_func = recursive_parse
    else:
        parse_func = parse_file_to_dict

    if options.output is not None:
        import matplotlib
        matplotlib.use("agg")
    global date2num
    from matplotlib.dates import date2num, num2date
    from matplotlib.patches import Rectangle
    from matplotlib.collections import PatchCollection
    import matplotlib.pyplot as plt

    fig = plt.figure()
    ax = fig.add_subplot(111)

    # Plot vertical lines if option 'event_times' was specified
    if options.event_times:
        times = options.event_times.split(',')
        times = map(UTCDateTime, times)
        times = map(date2num, times)
        for time in times:
            ax.axvline(time, color='k')

    if options.starttime:
        options.starttime = UTCDateTime(options.starttime)
        options.starttime = date2num(options.starttime)
    if options.endtime:
        options.endtime = UTCDateTime(options.endtime)
        options.endtime = date2num(options.endtime)

    # Generate dictionary containing nested lists of start and end times per
    # station
    data = {}
    samp_int = {}
    counter = 1
    if options.load:
        load_npz(options.load, data, samp_int)
    for path in largs:
        counter = parse_func(data, samp_int, path, counter, options.format,
                             options.verbose, options.ignore_links)
    if not data:
        print("No waveform data found.")
        return
    if options.write:
        write_npz(options.write, data, samp_int)

    # Loop through this dictionary
    ids = data.keys()
    # restrict plotting of results to given ids
    if options.ids:
        options.ids = options.ids.split(',')
        ids = filter(lambda x: x in options.ids, ids)
    ids = sorted(ids)[::-1]
    labels = [""] * len(ids)
    print
    for _i, _id in enumerate(ids):
        labels[_i] = ids[_i]
        data[_id].sort()
        startend = np.array(data[_id])
        if len(startend) == 0:
            continue
        # restrict plotting of results to given start/endtime
        if options.starttime:
            startend = startend[startend[:, 1] > options.starttime]
        if len(startend) == 0:
            continue
        if options.starttime:
            startend = startend[startend[:, 0] < options.endtime]
        if len(startend) == 0:
            continue
        if _id not in samp_int:
            warnings.warn('Problem with _id=%s, skipping' % _id)
            continue

        startend_compressed = compressStartend(startend, 1000)

        offset = np.ones(len(startend)) * _i  # generate list of y values
        ax.xaxis_date()
        if not options.nox:
            ax.plot_date(startend[:, 0], offset, 'x', linewidth=2)
        ax.hlines(offset[:len(startend_compressed)], startend_compressed[:, 0],
                  startend_compressed[:, 1], 'b', linewidth=2, zorder=3)
        # find the gaps
        diffs = startend[1:, 0] - startend[:-1, 1]  # currend.start - last.end
        gapsum = diffs[diffs > 0].sum()
        timerange = startend[:, 1].max() - startend[:, 0].min()
        perc = (timerange - gapsum) / timerange
        labels[_i] = labels[_i] + "\n%.1f%%" % (perc * 100)
        gap_indices = diffs > 1.8 * samp_int[_id]
        gap_indices = np.concatenate((gap_indices, [False]))
        if any(gap_indices):
            # dont handle last endtime as start of gap
            gaps_start = startend[gap_indices, 1]
            gaps_end = startend[np.roll(gap_indices, 1), 0]
            if not options.nogaps and any(gap_indices):
                rects = [Rectangle((start_, offset[0] - 0.4),
                                   end_ - start_, 0.8)
                         for start_, end_ in zip(gaps_start, gaps_end)]
                ax.add_collection(PatchCollection(rects, color="r"))
            if options.print_gaps:
                for start_, end_ in zip(gaps_start, gaps_end):
                    start_, end_ = num2date((start_, end_))
                    start_ = UTCDateTime(start_.isoformat())
                    end_ = UTCDateTime(end_.isoformat())
                    print "%s %s %s %.3f" % (_id, start_, end_, end_ - start_)

    # Pretty format the plot
    ax.set_ylim(0 - 0.5, _i + 0.5)
    ax.set_yticks(np.arange(_i + 1))
    ax.set_yticklabels(labels, family="monospace", ha="right")
    # set x-axis limits according to given start/endtime
    if options.starttime:
        ax.set_xlim(left=options.starttime, auto=None)
    if options.endtime:
        ax.set_xlim(right=options.endtime, auto=None)
    fig.autofmt_xdate()  # rotate date
    plt.subplots_adjust(left=0.2)
    if options.output is None:
        plt.show()
    else:
        fig.set_dpi(72)
        height = len(ids) * 0.5
        height = max(4, height)
        fig.set_figheight(height)
        # tight_layout() only available from matplotlib >= 1.1
        try:
            plt.tight_layout()
            days = ax.get_xlim()
            days = days[1] - days[0]
            width = max(6, days / 30.)
            width = min(width, height * 4)
            fig.set_figwidth(width)
            plt.subplots_adjust(top=1, bottom=0, left=0, right=1)
            plt.tight_layout()
        except:
            pass
        fig.savefig(options.output)
    sys.stdout.write('\n')
コード例 #50
0
ファイル: test_fields.py プロジェクト: shineusn/obspy
 def test_readCompactDateTime(self):
     field = VariableString(1,
                            "test",
                            0,
                            22,
                            'T',
                            strict=True,
                            compact=True)
     #1
     orig = b'1992,002~'
     dt = field.read(compatibility.BytesIO(orig))
     self.assertEqual(dt, UTCDateTime(1992, 1, 2))
     self.assertEqual(field.write(dt), orig)
     #2
     orig = b'2007,199~'
     dt = field.read(compatibility.BytesIO(orig))
     self.assertEqual(dt, UTCDateTime(2007, 7, 18))
     self.assertEqual(field.write(dt), orig)
     #3 - wrong syntax
     orig = b'1992'
     self.assertRaises(Exception, field.read, compatibility.BytesIO(orig))
     orig = b'1992,'
     self.assertRaises(Exception, field.read, compatibility.BytesIO(orig))
     orig = b'1992~'
     self.assertRaises(Exception, field.read, compatibility.BytesIO(orig))
     orig = b'1992,~'
     self.assertRaises(Exception, field.read, compatibility.BytesIO(orig))
     #5 - empty datetime
     orig = b'~'
     dt = field.read(compatibility.BytesIO(orig))
     self.assertEqual(dt, '')
     self.assertEqual(field.write(dt), b'~')
     #6 - bad syntax
     orig = b''
     self.assertRaises(Exception, field.read, compatibility.BytesIO(orig))
     self.assertEqual(field.write(dt), b'~')
     #7
     orig = b'2007,199~'
     dt = field.read(compatibility.BytesIO(orig))
     self.assertEqual(dt, UTCDateTime(2007, 7, 18))
     self.assertEqual(field.write(dt), b'2007,199~')
     #8
     orig = b'2009,074,12~'
     dt = field.read(compatibility.BytesIO(orig))
     self.assertEqual(dt, UTCDateTime(2009, 3, 15, 12))
     self.assertEqual(field.write(dt), orig)
     #9
     orig = b'2008,358,01:30:22.0012~'
     dt = field.read(compatibility.BytesIO(orig))
     self.assertEqual(dt, UTCDateTime(2008, 12, 23, 0o1, 30, 22, 1200))
     self.assertEqual(field.write(dt), orig)
     #
     orig = b'2008,358,00:00:22~'
     dt = field.read(compatibility.BytesIO(orig))
     self.assertEqual(dt, UTCDateTime(2008, 12, 23, 00, 00, 22, 0))
     self.assertEqual(field.write(dt), orig)
     #
     orig = b'2008,358,00:30~'
     dt = field.read(compatibility.BytesIO(orig))
     self.assertEqual(dt, UTCDateTime(2008, 12, 23, 00, 30, 0, 0))
     self.assertEqual(field.write(dt), orig)
     #
     orig = b'2008,358,01~'
     dt = field.read(compatibility.BytesIO(orig))
     self.assertEqual(dt, UTCDateTime(2008, 12, 23, 0o1, 0, 0, 0))
     self.assertEqual(field.write(dt), orig)
     #
     orig = b'2008,358~'
     dt = field.read(compatibility.BytesIO(orig))
     self.assertEqual(dt, UTCDateTime(2008, 12, 23, 0, 0, 0, 0))
     self.assertEqual(field.write(dt), orig)
     #
     orig = b'2008,358,01:30:22.5~'
     dt = field.read(compatibility.BytesIO(orig))
     self.assertEqual(dt, UTCDateTime(2008, 12, 23, 0o1, 30, 22, 500000))
     self.assertEqual(field.write(dt), b'2008,358,01:30:22.5000~')
コード例 #51
0
ファイル: core.py プロジェクト: FMassin/obspy
    def _parse_origin(self, line):
        # 1-10    i4,a1,i2,a1,i2    epicenter date (yyyy/mm/dd)
        # 12-22   i2,a1,i2,a1,f5.2  epicenter time (hh:mm:ss.ss)
        time = UTCDateTime.strptime(line[:17], '%Y/%m/%d %H:%M:')
        time += float(line[17:22])
        # 23      a1    fixed flag (f = fixed origin time solution, blank if
        #                           not a fixed origin time)
        time_fixed = fixed_flag(line[22])
        # 25-29   f5.2  origin time error (seconds; blank if fixed origin time)
        time_error = float_or_none(line[24:29])
        time_error = time_error and QuantityError(uncertainty=time_error)
        # 31-35   f5.2  root mean square of time residuals (seconds)
        rms = float_or_none(line[30:35])
        # 37-44   f8.4  latitude (negative for South)
        latitude = float_or_none(line[36:44])
        # 46-54   f9.4  longitude (negative for West)
        longitude = float_or_none(line[45:54])
        # 55      a1    fixed flag (f = fixed epicenter solution, blank if not
        #                           a fixed epicenter solution)
        epicenter_fixed = fixed_flag(line[54])
        # 56-60   f5.1  semi-major axis of 90% ellipse or its estimate
        #               (km, blank if fixed epicenter)
        _uncertainty_major_m = float_or_none(line[55:60], multiplier=1e3)
        # 62-66   f5.1  semi-minor axis of 90% ellipse or its estimate
        #               (km, blank if fixed epicenter)
        _uncertainty_minor_m = float_or_none(line[61:66], multiplier=1e3)
        # 68-70   i3    strike (0 <= x <= 360) of error ellipse clock-wise from
        #                       North (degrees)
        _uncertainty_major_azimuth = float_or_none(line[67:70])
        # 72-76   f5.1  depth (km)
        depth = float_or_none(line[71:76], multiplier=1e3)
        # 77      a1    fixed flag (f = fixed depth station, d = depth phases,
        #                           blank if not a fixed depth)
        epicenter_fixed = fixed_flag(line[76])
        # 79-82   f4.1  depth error 90% (km; blank if fixed depth)
        depth_error = float_or_none(line[78:82], multiplier=1e3)
        # 84-87   i4    number of defining phases
        used_phase_count = int_or_none(line[83:87])
        # 89-92   i4    number of defining stations
        used_station_count = int_or_none(line[88:92])
        # 94-96   i3    gap in azimuth coverage (degrees)
        azimuthal_gap = float_or_none(line[93:96])
        # 98-103  f6.2  distance to closest station (degrees)
        minimum_distance = float_or_none(line[97:103])
        # 105-110 f6.2  distance to furthest station (degrees)
        maximum_distance = float_or_none(line[104:110])
        # 112     a1    analysis type: (a = automatic, m = manual, g = guess)
        evaluation_mode, evaluation_status = \
            evaluation_mode_and_status(line[111])
        # 114     a1    location method: (i = inversion, p = pattern
        #                                 recognition, g = ground truth, o =
        #                                 other)
        location_method = LOCATION_METHODS[line[113].strip().lower()]
        # 116-117 a2    event type:
        # XXX event type and event type certainty is specified per origin,
        # XXX not sure how to bset handle this, for now only use it if
        # XXX information on the individual origins do not clash.. not sure yet
        # XXX how to identify the preferred origin..
        event_type, event_type_certainty = \
            EVENT_TYPE_CERTAINTY[line[115:117].strip().lower()]
        # 119-127 a9    author of the origin
        author = line[118:127].strip()
        # 129-136 a8    origin identification
        origin_id = self._construct_id(['origin', line[128:136].strip()])

        # do some combinations
        depth_error = depth_error and dict(uncertainty=depth_error,
                                           confidence_level=90)
        if all(v is not None for v in (_uncertainty_major_m,
                                       _uncertainty_minor_m,
                                       _uncertainty_major_azimuth)):
            origin_uncertainty = OriginUncertainty(
                min_horizontal_uncertainty=_uncertainty_minor_m,
                max_horizontal_uncertainty=_uncertainty_major_m,
                azimuth_max_horizontal_uncertainty=_uncertainty_major_azimuth,
                preferred_description='uncertainty ellipse',
                confidence_level=90)
            # event init always sets an empty QuantityError, even when
            # specifying None, which is strange
            for key in ['confidence_ellipsoid']:
                setattr(origin_uncertainty, key, None)
        else:
            origin_uncertainty = None
        origin_quality = OriginQuality(
            standard_error=rms, used_phase_count=used_phase_count,
            used_station_count=used_station_count, azimuthal_gap=azimuthal_gap,
            minimum_distance=minimum_distance,
            maximum_distance=maximum_distance)
        comments = []
        if location_method:
            comments.append(
                self._make_comment('location method: ' + location_method))
        if author:
            creation_info = CreationInfo(author=author)
        else:
            creation_info = None
        # assemble whole event
        origin = Origin(
            time=time, resource_id=origin_id, longitude=longitude,
            latitude=latitude, depth=depth, depth_errors=depth_error,
            origin_uncertainty=origin_uncertainty, time_fixed=time_fixed,
            epicenter_fixed=epicenter_fixed, origin_quality=origin_quality,
            comments=comments, creation_info=creation_info)
        # event init always sets an empty QuantityError, even when specifying
        # None, which is strange
        for key in ('time_errors', 'longitude_errors', 'latitude_errors',
                    'depth_errors'):
            setattr(origin, key, None)
        return origin, event_type, event_type_certainty
コード例 #52
0
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Thu Sep 28 14:26:44 2017

@author: horas
"""
from obspy import UTCDateTime
a = 7.3282395e+05

time = UTCDateTime(a)

print time
print time.date
print time.year
print time.month
print time.day
print time.time
print time.hour
print time.minute
print time.second
print time.microsecond
print '\n'
print time.julday
print time.timestamp
print time.weekday

a = 2
b = 3
print("%f\t%f" % (a, b))
コード例 #53
0
 def __init__(self, start, end):
     self.start = UTCDateTime(start)
     self.end = UTCDateTime(end)
コード例 #54
0
ファイル: test_utils.py プロジェクト: niowniow/obsplus
 def picks(self):
     t1, t2 = UTCDateTime("2016-01-01"), UTCDateTime("2015-01-01")
     picks = [ev.Pick(time=t1), ev.Pick(time=t2), ev.Pick()]
     return picks
コード例 #55
0
ファイル: test_utcdatetime.py プロジェクト: avuan/obspy
 def test_formatSEED(self):
     """
     Tests formatSEED method
     """
     # 1
     dt = UTCDateTime("2010-01-01")
     self.assertEqual(dt.formatSEED(compact=True), "2010,001")
     # 2
     dt = UTCDateTime("2010-01-01T00:00:00.000000")
     self.assertEqual(dt.formatSEED(compact=True), "2010,001")
     # 3
     dt = UTCDateTime("2010-01-01T12:00:00")
     self.assertEqual(dt.formatSEED(compact=True), "2010,001,12")
     # 4
     dt = UTCDateTime("2010-01-01T12:34:00")
     self.assertEqual(dt.formatSEED(compact=True), "2010,001,12:34")
     # 5
     dt = UTCDateTime("2010-01-01T12:34:56")
     self.assertEqual(dt.formatSEED(compact=True), "2010,001,12:34:56")
     # 6
     dt = UTCDateTime("2010-01-01T12:34:56.123456")
     self.assertEqual(dt.formatSEED(compact=True),
                      "2010,001,12:34:56.1234")
     # 7 - explicit disabling compact flag still results into compact date
     # if no time information is given
     dt = UTCDateTime("2010-01-01")
     self.assertEqual(dt.formatSEED(compact=False), "2010,001")
コード例 #56
0
ファイル: getIIdata.py プロジェクト: aringler-usgs/getIIdata
class GetIIData(object):
	# initialize input vars
	def __init__(self, year, startday, network, **kwargs):
		# initialize year/start/net
		# if statement to check for main args set QUERY=True	
		# else sys.exit(1)
		if (year != "") and (startday != "") and (network != ""):	
			self.year = year
			self.startday = startday
			self.network = network
			QUERY = True
		else:
			QUERY = False

		# loop through **kwargs and initialize optargs
		self.endday = ""	# init endday string
		self.station = "" 	# init station string
		self.location = ""	# init location string
		self.channel = ""	# init channel string
		self.debug = False	# init debug
		self.archive = False	# init archive
		endday = self.endday
		for key,val in kwargs.iteritems(): 
			if key == "endday": self.endday = val
			elif key == "station": self.station = val
			elif key == "location": self.location = val
			elif key == "channel": self.channel = val
			elif key == "debug": self.debug = self.toBool(val)
			elif key == "archive": self.archive = self.toBool(val) 

		# print arguments if 'debug' mode
		if self.debug:
			print "Year: " + self.year
			print "Start Day: " + self.startday
			print "End Day: " + self.endday
			print "Network: " + self.network
			print "Station: " + self.station
			print "Location: " + self.location
			print "Channel: " + self.channel

		# handle wildcards
		if self.location == "?":
			self.location = "*"
		if self.channel == "?":
			self.channel = "*"
		if self.station == "?":
			self.station = "*"

		# set start/end to UTCDateTime object
		#--------------------------------------------------------------------
		self.startTime = UTCDateTime(year + startday +"T00:00:00.000")
		# If no end day in parser default to 1 day
		if self.endday == "?":
			self.endday = str(int(self.startday) + 1).zfill(3)
			self.endTime = self.startTime + 24*60*60
		else:
			self.endTime = UTCDateTime(year + self.endday +"T00:00:00.000")
		print "Here is our start time: " + self.startTime.formatIRISWebService()
		print "Here is our end time:   " + self.endTime.formatIRISWebService()
		self.days = int(self.endday)- int(self.startday)
		# there are 24, 1 hour increments in a day
		self.hours = (int(self.endday)- int(self.startday)) * 24 
		# Will only run if main args are given
		# check QUERY flag if True continue
		if QUERY:
			self.queryData()
		else:
			print '\nNo main args given.'
			print 'Exiting\n'
			sys.exit(1)
	
	def queryData(self):
		# code from IRIS client 
		# Here we pull the data
		client = Client("IRIS")
		DupStations = []
		DupLocations = []
		DupChannels = []
		self.st = Stream()
		self.STAWILD = False
		self.LOCWILD = False
		self.CHANWILD = False
		
		try:
			timeout = 300
			socket.setdefaulttimeout(timeout)
			# this needs to have a get_waveform that queries data 1 hour at a time
			# data cant query right now if the data is too bulky
			# also needs to include a timeout exception
			for hourIndex in range(0,self.hours): #this cant be days... has to be hours
				self.startTime1 = self.startTime + (hourIndex)*1*60*60
				self.endTime1 = self.startTime + (hourIndex+1)*1*60*60
				requestArray = [(self.network,self.station,self.location, \
					self.channel,self.startTime1,self.endTime1)]
				self.st1 = client.get_waveforms_bulk(requestArray)
				self.st += self.st1
				print self.st	
				print
				#self.st = client.get_waveforms_bulk(timeout=10,requestArray)
				
			for self.tr in self.st:
				#Here we remove the M data quality and go with D
				self.tr.stats.mseed['dataquality'] = 'D'
				if self.debug:
					if self.station == '*':
						self.STAWILD = True
						DupStations.append(self.tr.stats.station)				
		    			elif self.station != '*':
                				self.STAWILD = False
	
            				if self.location == '*':
						self.LOCWILD = True	
                				DupLocations.append(self.tr.stats.location)
		    			elif self.location != '*':
						self.LOCWILD = False 
				
					if self.channel == '*':
						self.CHANWILD = True	
                				DupChannels.append(self.tr.stats.channel)
		    			elif self.channel != '*':
						self.CHANWILD = False 
		#except TimeoutError:
			#print 'Get waveform timeout, exiting...'
			#sys.exit(0)	
		except:
			print 'Trouble getting data'
			sys.exit(0)
		
		# Takes duplicate stations out of list and 
		# makes station, location, and channel into an array 
		# for looping( probably easier way but it works)
		self.stations = list(set(DupStations))
		if self.station != '*':
			self.stations.append(self.station)
		self.locations = list(set(DupLocations))
		if self.location != '*':
			self.locations.append(self.location)
		self.channels = list(set(DupChannels))
		if self.channel != '*':	
			self.channels.append(self.channel)
		print
		print "Station(s) being pulled: " + str(self.stations)
		print "Location(s) being pulled: " + str(self.locations)
		print "Channel(s) being pulled: " + str(self.channels)
			
		# Now call code to store streams in mseed files
		self.storeMSEED()
	
	def storeMSEED(self):
		#Main program
		#code for storing MSEED files
		codepath = '/home/mkline/dev/getIIdataBackup/TEST_ARCHIVE/'
		self.stFinal = Stream()
		for self.channel in self.channels:
			self.trace2 = self.st.select(channel = self.channel)
			for self.location in self.locations:
				self.trace1 = self.trace2.select(location = self.location)
				for self.station in self.stations:
					print
					print "For station, location, and channel: " \
						+ self.station +" "+ self.location +" "+ self.channel
					trace = self.trace1.select(station = self.station)
					trace.merge()
					trace.sort()
					trace.count()
					for dayIndex in range(0,self.days):
						print "Day properties: "
						#startTime works better than trace[0].stats.starttime
						trimStart = self.startTime + (dayIndex)*24*60*60
						trimEnd = self.startTime + (dayIndex+1)*24*60*60
						print "Start of day: " + str(trimStart)
						print "End of day:   " + str(trimEnd)
						#Converting date into julian day to store in directory
						timesplit = re.split('T', str(trimStart))
						s = timesplit[0]
						fmt = '%Y-%m-%d'
						dt = datetime.datetime.strptime(s, fmt)
						tt = dt.timetuple()
						NewStartDay = str(tt.tm_yday).zfill(3)
						self.stFinal = trace.copy()
						self.stFinal.trim(starttime = trimStart, endtime = trimEnd)
						# This if statement is used to make sure traces with no 
						# data dont get added to the directory structure
						if not self.stFinal or str(self.stFinal[0].max()) == '--':
							print "No trace for given day"
						else:
							#Added the directory structures in here since you won't want to
							#add directory structures that you don't use
							self.stFinal = self.stFinal.split()
							if not os.path.exists(codepath + self.network + '_' + self.station  + '/'):
								os.mkdir(codepath + self.network + '_' + self.station  + '/')
							if not os.path.exists(codepath + self.network + '_' + self.station  + '/' \
								+ self.year + '/'):
								os.mkdir(codepath + self.network + '_' + self.station  + '/' \
								+ self.year + '/')
							stpath = codepath + self.network + '_' + self.station  + '/' + self.year + \
								'/' + self.year + '_' + NewStartDay + '/'
							if not os.path.exists(stpath):
								os.mkdir(stpath)
							# Here we write the data using STEIM 2 and 512 record lengths
							self.stFinal.write(stpath + self.stFinal[0].stats.location + '_' + \
								self.stFinal[0].stats.channel + '.512.seed', format='MSEED', \
								reclen = 512, encoding='STEIM2')
							print self.stFinal
						
							

	# convert optional boolean strings to boolean vars
	def toBool(self, value):
		"""
		Converts 'string' to boolean. Raises exception for invalid formats
			True values: 1, True, true, "1", "True", "true", "yes", "y", "t"
			False values: 0, False, false, "0", "False", "false", "no", "n", "f" 
		"""
		if str(value).lower() in ("true", "yes", "t", "y", "1"): return True
		if str(value).lower() in ("false", "no", "f", "n", "0"): return False
		raise Exception('Invalid value for boolean conversion: ' + str(value))
コード例 #57
0
ファイル: findpeaks.py プロジェクト: cjhopp/scripts
def find_peaks2(arr, thresh, trig_int, debug=0, maxwidth=10,
                starttime=False, samp_rate=1.0):
    r"""Function to determine peaks in an array of data using scipy \
    find_peaks_cwt, works fast in certain cases, but for match_filter cccsum \
    peak finding, find_peaks2_short works better.  Test it out and see which \
    works best for your application.

    :type arr: ndarray
    :param arr: 1-D numpy array is required
    :type thresh: float
    :param thresh: The threshold below which will be considered noise and \
    peaks will not be found in.
    :type trig_int: int
    :param trig_int: The minimum difference in samples between triggers, \
    if multiple peaks within this window this code will find the highest.
    :type debug: int
    :param debug: Optional, debug level 0-5
    :type maxwidth: int
    :param maxwidth: Maximum peak width to look for in samples
    :type starttime: osbpy.UTCDateTime
    :param starttime: Starttime for plotting, only used if debug > 2.
    :type samp_rate: float
    :param samp_rate: Sampling rate in Hz, only used for plotting if debug > 2.

    :return: peaks: Lists of tuples of peak values and locations.
    """
    from scipy.signal import find_peaks_cwt
    import numpy as np
    from obspy import UTCDateTime
    if not starttime:
        starttime = UTCDateTime(0)
    # Set everything below the threshold to zero
    image = np.copy(arr)
    image = np.abs(image)
    image[image < thresh] = thresh
    # We need to check if the number of samples in the image is prime, if it
    # is this method will be really slow, so we add a pad to the end to make
    # it not of prime length!
    if is_prime(len(image)):
        image = np.append(image, 0.0)
        print 'Input array has a prime number of samples, appending a zero'
        print len(image)
    if len(image[image > thresh]) == 0:
        print 'No values over threshold found'
        return []
    if debug > 0:
        msg = ' '.join(['Found', str(len(image[image > thresh])),
                        'samples above the threshold'])
        print msg
    initial_peaks = []
    peaks = []
    # Find the peaks
    print 'Finding peaks'
    peakinds = find_peaks_cwt(image, np.arange(1, maxwidth))
    initial_peaks = [(image[peakind], peakind) for peakind in peakinds]
    # Sort initial peaks according to amplitude
    print 'sorting peaks'
    peaks_sort = sorted(initial_peaks, key=lambda amplitude: amplitude[0],
                        reverse=True)
    if debug >= 4:
        for peak in initial_peaks:
            print peak
    if initial_peaks:
        peaks.append(peaks_sort[0])  # Definitely take the biggest peak
        if debug > 3:
            msg = ' '.join(['Added the biggest peak of', str(peaks[0][0]),
                            'at sample', str(peaks[0][1])])
            print msg
        if len(initial_peaks) > 1:
            if debug > 3:
                msg = ' '.join(['Multiple peaks found, checking them',
                                'now to see if they overlap'])
                print msg
            for next_peak in peaks_sort:
                # i in xrange(1,len(peaks_sort)):
                # Loop through the amplitude sorted peaks
                # if the next highest amplitude peak is within trig_int of any
                # peak already in peaks then we don't want it, else, add it
                # next_peak = peaks_sort[i]
                if debug > 3:
                    print next_peak
                for peak in peaks:
                    add = False
                    # Use add as a switch for whether or not to append
                    # next peak to peaks, if once gone through all the peaks
                    # it is True, then we will add it, otherwise we won't!
                    if abs(next_peak[1] - peak[1]) < trig_int:
                        if debug > 3:
                            msg = ' '.join(['Difference in time is',
                                            str(next_peak[1] - peak[1]), '\n'
                                            'Which is less than',
                                            str(trig_int)])
                            print msg
                        add = False
                        # Need to exit the loop here if false
                        break
                    else:
                        add = True
                if add:
                    if debug > 3:
                        msg = ' '.join(['Adding peak of', str(next_peak[0]),
                                        'at sample', str(next_peak[1])])
                        print msg
                    peaks.append(next_peak)
                elif debug > 3:
                    msg = ' '.join(['I did not add peak of',
                                    str(next_peak[0]), 'at sample',
                                    str(next_peak[1])])
                    print msg

        if debug >= 3:
            from eqcorrscan.utils import EQcorrscan_plotting
            _fname = ''.join(['peaks_',
                              starttime.datetime.strftime('%Y-%m-%d'),
                              '.pdf'])
            print ' '.join(['Saving plot to', _fname])
            EQcorrscan_plotting.peaks_plot(image, starttime, samp_rate, True,
                                           peaks, _fname)
        peaks = sorted(peaks, key=lambda time: time[1], reverse=False)
        return peaks
    else:
        print 'No peaks for you!'
        return peaks
コード例 #58
0
def analyze_data(families, staloc, tbegin, tend, \
    freq0, type_threshold, threshold, ncpu, icpu):
    """
    """
    nfamilies = int(ceil(len(families) / ncpu))
    ibegin = icpu * nfamilies
    iend = min((icpu + 1) * nfamilies, len(families))

    # Loop on families
    for i in range(ibegin, iend):

        # Create directory to store the LFEs times
        namedir = 'LFEs/' + families['family'].iloc[i]
        if not os.path.exists(namedir):
            os.makedirs(namedir)

        # File to write number of stations
        namedir = 'nstations'
        if not os.path.exists(namedir):
            os.makedirs(namedir)
        stationfile = 'nstations/' + families['family'].iloc[i] + '.txt'

        # Create dataframe to store LFE times
        df = pd.DataFrame(columns=['year', 'month', 'day', 'hour', \
            'minute', 'second', 'cc', 'nchannel'])

        # Read the templates
        stations = families['stations'].iloc[i].split(',')
        templates = Stream()
        orientations = []
        names = []
        for station in stations:
            subset = staloc.loc[staloc['station'] == station]
            channels = subset['channels'].iloc[0]
            mychannels = channels.split(',')
            for channel in mychannels:
                data = pickle.load(open(template_dir + '/' + \
                    families['family'].iloc[i] + '/' + station + '_' + \
                    channel + '.pkl', 'rb'))
                template = data[0]
                angle = data[1]
                templates.append(template)
                orientations.append(angle)
                names.append(station + '_' + channel)

        # Check the time step of the stations
        subset = staloc.loc[staloc['station'].isin(stations)]
        if len(subset['dt'].value_counts()) == 1:
            dt = subset['dt'].iloc[0]
        else:
            raise ValueError('All stations must have the same time step')

        # Number of hours of data to analyze
        t1 = UTCDateTime(year=tbegin[0], month=tbegin[1], \
            day=tbegin[2], hour=tbegin[3], minute=tbegin[4], \
            second=tbegin[5])
        t2 = UTCDateTime(year=tend[0], month=tend[1], \
            day=tend[2], hour=tend[3], minute=tend[4], \
            second=tend[5])
        nhour = int(ceil((t2 - t1) / 3600.0))
        duration = families['duration'].iloc[i]

        # To rotate components
        swap = {'E': 'N', 'N': 'E', '1': '2', '2': '1'}

        # Loop on hours of data
        for hour in range(0, nhour):
            Tstart = t1 + hour * 3600.0
            Tend = t1 + (hour + 1) * 3600.0 + duration
            delta = Tend - Tstart
            ndata = int(delta / dt) + 1

            # Get the data
            data = []
            for station in stations:
                subset = staloc.loc[staloc['station'] == station]
                channels = subset['channels'].iloc[0]
                mychannels = channels.split(',')
                for num, channel in enumerate(mychannels):
                    try:
                        D = read('tmp/' + station + '_' + channel + '.mseed')
                        D = D.slice(Tstart, Tend)

                        if (type(D) == obspy.core.stream.Stream):
                            namefile = 'tmp/' + station + '_' + channel + \
                                '.pkl'
                            orientation = pickle.load(open(namefile, 'rb')) \
                                [num]
                            index = names.index(station + '_' + channel)
                            reference = orientations[index]

                            # Rotate components
                            if (len(mychannels) > 1) and (num < 2):
                                if orientation != reference:
                                    channel_new = channel[0:2] + \
                                        swap[channel[2]]
                                    D_new = read('tmp/' + station + '_' + \
                                        channel_new + '.mseed')
                                    D_new = D_new.slice(Tstart, Tend)
                                    namefile = 'tmp/' + station + '_' + \
                                        channel_new + '.pkl'
                                    if num == 0:
                                        orientation_new = pickle.load(open( \
                                            namefile, 'rb'))[1]
                                    else:
                                        orientation_new = pickle.load(open( \
                                            namefile, 'rb'))[0]
                                    index = names.index(station + '_' + \
                                        channel_new)
                                    reference_new = orientations[index]
                                    if channel[2] in ['E', '1']:
                                        D = rotate_data(D, D_new, \
                                            orientation, orientation_new, \
                                            reference, reference_new, 'E')
                                    else:
                                        D = rotate_data(D_new, D, \
                                            orientation_new, orientation, \
                                            reference_new, reference, 'N')

                            # Append stream to data
                            data.append(D)
                    except:
                        message = 'No data available for station {}'.format( \
                            station) + ' and channel {}'.format(channel) + \
                            ' at time {}/{}/{} - {}:{}:{}\n'.format( \
                            Tstart.year, Tstart.month, Tstart.day, \
                            Tstart.hour, Tstart.minute, Tstart.second)

            # Loop on channels
            nchannel = 0
            for j in range(0, len(data)):
                subdata = data[j]
                # Check whether we have a complete one-hour-long recording
                if (len(subdata) == 1):
                    if (len(subdata[0].data) == ndata):
                        # Get the template
                        station = subdata[0].stats.station
                        channel = subdata[0].stats.channel
                        template = templates.select(station=station, \
                            channel=channel)[0]
                        # Cross correlation
                        cctemp = correlate.optimized(template, subdata[0])
                        if (nchannel > 0):
                            cc = np.vstack((cc, cctemp))
                        else:
                            cc = cctemp
                        nchannel = nchannel + 1

            # Write number of channels
            with open(stationfile, 'a') as file:
                file.write('{} {} {} {} {}\n'.format(Tstart.year, \
                    Tstart.month, Tstart.day, Tstart.hour, nchannel))

            if (nchannel > 0):
                # Compute average cross-correlation across channels
                if len(np.shape(cc)) == 1:
                    meancc = cc
                else:
                    meancc = np.mean(cc, axis=0)
                if (type_threshold == 'MAD'):
                    MAD = np.median(np.abs(meancc - np.mean(meancc)))
                    index = np.where(meancc >= threshold * MAD)
                elif (type_threshold == 'Threshold'):
                    index = np.where(meancc >= threshold)
                else:
                    raise ValueError( \
                        'Type of threshold must be MAD or Threshold')
                times = np.arange(0.0, np.shape(meancc)[0] * dt, dt)

                # Get LFE times
                if np.shape(index)[1] > 0:
                    (time, cc) = clean_LFEs(index, times, meancc, dt, freq0)

                    # Add LFE times to dataframe
                    i0 = len(df.index)
                    for j in range(0, len(time)):
                        timeLFE = Tstart + time[j]
                        df.loc[i0 + j] = [int(timeLFE.year), \
                            int(timeLFE.month), int(timeLFE.day), \
                            int(timeLFE.hour), int(timeLFE.minute), \
                            timeLFE.second + timeLFE.microsecond / 1000000.0, \
                            cc[j], nchannel]

        # Add to pandas dataframe and save
        namefile = 'LFEs/' + families['family'].iloc[i] + '/catalog.pkl'
        if os.path.exists(namefile):
            df_all = pickle.load(open(namefile, 'rb'))
            df_all = pd.concat([df_all, df], ignore_index=True)
        else:
            df_all = df
        df_all = df_all.astype(dtype={'year':'int32', 'month':'int32', \
            'day':'int32', 'hour':'int32', 'minute':'int32', \
            'second':'float', 'cc':'float', 'nchannel':'int32'})
        pickle.dump(df_all, open(namefile, 'wb'))
コード例 #59
0
ファイル: build_dataset.py プロジェクト: stbnps/Cospy
from aux_functions import download_data

start_time_string = sys.argv[1]
end_time_string = sys.argv[2]
base_url = sys.argv[3]
phase = sys.argv[4]

# start_time_string = '2017-01-01'
# end_time_string = '2017-01-03'
# base_url = 'NCEDC'
# phase = 'P'

data_path = 'data_' + phase.lower() + '/'

start_time = UTCDateTime(start_time_string)
end_time = UTCDateTime(end_time_string)

time_window = 60
request_all_channels = True
# base_url = 'NCEDC'

waves = []
times = []

stride = 86400
st = start_time

timer_start = time()

while st < end_time:
コード例 #60
0
ファイル: getIIdata.py プロジェクト: mkline-usgs/getIIdata
class GetIIData(object):
	# initialize input vars
	def __init__(self, year, startday, network, **kwargs):
		# initialize year/start/net
		# if statement to check for main args set QUERY=True	
		# else sys.exit(1)
		if (year != "") and (startday != "") and (network != ""):	
			self.year = year
			self.startday = startday
			self.network = network
			QUERY = True
		else:
			QUERY = False

		# loop through **kwargs and initialize optargs
		self.endday = ""	# init endday string
		self.station = "" 	# init station string
		self.location = ""	# init location string
		self.channel = ""	# init channel string
		self.debug = False	# init debug
		self.archive = False	# init archive
		endday = self.endday
		for key,val in kwargs.iteritems(): 
			if key == "endday": self.endday = val
			elif key == "station": self.station = val
			elif key == "location": self.location = val
			elif key == "channel": self.channel = val
			elif key == "debug": self.debug = self.toBool(val)
			elif key == "archive": self.archive = self.toBool(val) 

		# print arguments if 'debug' mode
		if self.debug:
			print "Year: " + self.year
			print "Start Day: " + self.startday
			print "End Day: " + self.endday
			print "Network: " + self.network
			print "Station: " + self.station
			print "Location: " + self.location
			print "Channel: " + self.channel

		# handle wildcards
		if self.location == "?":
			self.location = "*"
		if self.channel == "?":
			self.channel = "*"
		if self.station == "?":
			self.station = "*"

		# set start/end to UTCDateTime object
		#--------------------------------------------------------------------
		self.startTime = UTCDateTime(year + startday +"T00:00:00.000")
		#If no end day in parser default to 1 day
		if self.endday == "?":
			self.endTime = self.startTime + 24*60*60
		else:
			self.endTime = UTCDateTime(year + self.endday +"T00:00:00.000")
		print "Here is our start time " + self.startTime.formatIRISWebService()
		print "Here is our end time   " + self.endTime.formatIRISWebService()
		# Will only run if main args are given
		# check QUERY flag if True continue
		if QUERY:
			self.queryData()
		else:
			print '\nNo main args given.'
			print 'Exiting\n'
			sys.exit(1)
	
	def queryData(self):
		# code from IRIS client 
		#Here we pull the data
		client = Client("IRIS")
		DupStations = []
		DupLocations = []
		DupChannels = []
		self.STAWILD = False
		self.LOCWILD = False
		self.CHANWILD = False
		try:
			requestArray = [(self.network,self.station,self.location, \
				self.channel,self.startTime,self.endTime)]
			print
			if self.debug:
				print(requestArray)
				print 
			self.st = client.get_waveforms_bulk(requestArray)
			for self.tr in self.st:
				#Here we remove the M data quality and go with D
				self.tr.stats.mseed['dataquality'] = 'D'
				if self.debug:
					#print "Here is a trace we have"
					#print(tr.stats)
					if self.station == '*':
						self.STAWILD = True
						DupStations.append(self.tr.stats.station)				
		    			elif self.station != '*':
                				self.STAWILD = False

            				if self.location == '*':
						self.LOCWILD = True	
                				DupLocations.append(self.tr.stats.location)
		    			elif self.location != '*':
						self.LOCWILD = False 
			
					if self.channel == '*':
						self.CHANWILD = True	
                				DupChannels.append(self.tr.stats.channel)
		    			elif self.channel != '*':
						self.CHANWILD = False 
		except:
			print 'Trouble getting data'
			sys.exit(0)
		#takes duplicate stations out of list
		self.stations = list(set(DupStations))
		self.locations = list(set(DupLocations))
		self.channels = list(set(DupChannels))
		print self.stations
		print self.locations
		print self.channels
		# Now call code to store streams in mseed files
		self.storeMSEED()
	
	#LAST THING TO DO!!!!
	def storeMSEED(self):
		#code for storing MSEED files
		#Need to check if the directories exist and if not make them
		#Main program
		codepath = '/home/mkline/dev/getIIdata/TEST_ARCHIVE/'
		self.days = int(round((self.st[-1].stats.endtime \
			- self.st[0].stats.starttime)/(24*60*60)))
		self.stFinal = Stream()

		if self.STAWILD:
			for self.station in self.stations:
				print
				print "For station: " + self.station
				trace = self.st.select(station = self.station)
				trace.merge()
				trace.sort()
				trace.count()
				for dayIndex in range(0,self.days):
					print "Day properties: "
					#startTime works better than trace[0].stats.starttime
					trimStart = self.startTime + (dayIndex)*24*60*60
					trimEnd = self.startTime + (dayIndex+1)*24*60*60
					print "Start of day: " + str(trimStart)
					print "End of day:   " + str(trimEnd)
					#Converting date into julian day
					timesplit = re.split('T', str(trimStart))
					s = timesplit[0]
					fmt = '%Y-%m-%d'
					dt = datetime.datetime.strptime(s, fmt)
					tt = dt.timetuple()
					if tt.tm_yday < 10:
						NewStartDay = '00' + str(tt.tm_yday)
					elif tt.tm_yday < 100:
						NewStartDay = '0' + str(tt.tm_yday)
					else:
						NewStartDay = str(tt.tm_yday)
					self.stFinal = trace.copy()
					self.stFinal.trim(starttime = trimStart, endtime = trimEnd)	
					self.stFinal = self.stFinal.split()
					if not self.stFinal:
						print "No trace for given day"
					else:
						#Added the directory structures in here since you won't want to
						#add directory structures that you don't use
						if not os.path.exists(codepath + self.network + '_' + self.station  + '/'):
							os.mkdir(codepath + self.network + '_' + self.station  + '/')
						if not os.path.exists(codepath + self.network + '_' + self.station  + '/' \
							+ self.year + '/'):
							os.mkdir(codepath + self.network + '_' + self.station  + '/' \
							+ self.year + '/')
						stpath = codepath + self.network + '_' + self.station  + '/' + self.year + \
							'/' + self.year + '_' + NewStartDay + '/'
						if not os.path.exists(stpath):
							os.mkdir(stpath)
						# Here we write the data using STEIM 2 and 512 record lengths
						self.stFinal.write(stpath + self.stFinal[0].stats.location + '_' + \
							self.stFinal[0].stats.channel + '.512.seed', format='MSEED', \
							reclen = 512, encoding='STEIM2')
						print self.stFinal

		elif self.LOCWILD:
			for self.location in self.locations:
				print
				print "For station: " + self.station
				trace = self.st.select(location = self.location)
				trace.merge()
				trace.sort()
				trace.count()
				for dayIndex in range(0,self.days):
					print "Day properties: "
					#startTime works better than trace[0].stats.starttime
					trimStart = self.startTime + (dayIndex)*24*60*60
					trimEnd = self.startTime + (dayIndex+1)*24*60*60
					print "Start of day: " + str(trimStart)
					print "End of day:   " + str(trimEnd)
					#Converting date into julian day
					timesplit = re.split('T', str(trimStart))
					s = timesplit[0]
					fmt = '%Y-%m-%d'
					dt = datetime.datetime.strptime(s, fmt)
					tt = dt.timetuple()
					if tt.tm_yday < 10:
						NewStartDay = '00' + str(tt.tm_yday)
					elif tt.tm_yday < 100:
						NewStartDay = '0' + str(tt.tm_yday)
					else:
						NewStartDay = str(tt.tm_yday)
					self.stFinal = trace.copy()
					self.stFinal.trim(starttime = trimStart, endtime = trimEnd)	
					self.stFinal = self.stFinal.split()
					if not self.stFinal:
						print "No trace for given day"
					else:
						#Added the directory structures in here since you won't want to
						#add directory structures that you don't use
						if not os.path.exists(codepath + self.network + '_' + self.station  + '/'):
							os.mkdir(codepath + self.network + '_' + self.station  + '/')
						if not os.path.exists(codepath + self.network + '_' + self.station  + '/' \
							+ self.year + '/'):
							os.mkdir(codepath + self.network + '_' + self.station  + '/' \
							+ self.year + '/')
						stpath = codepath + self.network + '_' + self.station  + '/' + self.year + \
							'/' + self.year + '_' + NewStartDay + '/'
						if not os.path.exists(stpath):
							os.mkdir(stpath)
						# Here we write the data using STEIM 2 and 512 record lengths
						self.stFinal.write(stpath + self.stFinal[0].stats.location + '_' + \
							self.stFinal[0].stats.channel + '.512.seed', format='MSEED', \
							reclen = 512, encoding='STEIM2')
						print self.stFinal
						

	# convert optional boolean strings to boolean vars
	def toBool(self, value):
		"""
		Converts 'string' to boolean. Raises exception for invalid formats
			True values: 1, True, true, "1", "True", "true", "yes", "y", "t"
			False values: 0, False, false, "0", "False", "false", "no", "n", "f" 
		"""
		if str(value).lower() in ("true", "yes", "t", "y", "1"): return True
		if str(value).lower() in ("false", "no", "f", "n", "0"): return False
		raise Exception('Invalid value for boolean conversion: ' + str(value))