示例#1
0
def export_sac(db, filename, pair, components, filterid, corr, ncorr=0, sac_format=None, maxlag=None, cc_sampling_rate=None):
    if sac_format is None:
        sac_format = get_config(db, "sac_format")
    if maxlag is None:
        maxlag = float(get_config(db, "maxlag"))
    if cc_sampling_rate is None:
        cc_sampling_rate = float(get_config(db, "cc_sampling_rate"))
    try:
        os.makedirs(os.path.split(filename)[0])
    except:
        pass
    filename += ".SAC"
    mytrace = Trace(data=corr)
    mytrace.stats['station'] = pair
    mytrace.stats['sampling_rate'] = cc_sampling_rate

    st = Stream(traces=[mytrace, ])
    st.write(filename, format='SAC')
    tr = SacIO(filename)
    if sac_format == "doublets":
        tr.SetHvalue('A', 120)
    else:
        tr.SetHvalue('B', -maxlag)
        tr.SetHvalue('DEPMIN', np.min(corr))
        tr.SetHvalue('DEPMAX', np.max(corr))
        tr.SetHvalue('DEPMEN', np.mean(corr))
        tr.SetHvalue('SCALE', 1)
        tr.SetHvalue('NPTS', len(corr))
    tr.WriteSacBinary(filename)
    del st, tr
    return
示例#2
0
def export_sac(db, filename, pair, components, filterid, corr, ncorr=0,
               sac_format=None, maxlag=None, cc_sampling_rate=None):
    if sac_format is None:
        sac_format = get_config(db, "sac_format")
    if maxlag is None:
        maxlag = float(get_config(db, "maxlag"))
    if cc_sampling_rate is None:
        cc_sampling_rate = float(get_config(db, "cc_sampling_rate"))
    try:
        os.makedirs(os.path.split(filename)[0])
    except:
        pass
    filename += ".SAC"
    mytrace = Trace(data=corr)
    mytrace.stats['station'] = pair
    mytrace.stats['sampling_rate'] = cc_sampling_rate
    if maxlag:
        mytrace.stats.starttime = -maxlag
    mytrace.stats.sac = AttribDict()
    mytrace.stats.sac.depmin = np.min(corr)
    mytrace.stats.sac.depmax = np.max(corr)
    mytrace.stats.sac.depmen = np.mean(corr)
    mytrace.stats.sac.scale = 1
    mytrace.stats.sac.npts = len(corr)

    st = Stream(traces=[mytrace, ])
    st.write(filename, format='SAC')
    del st
    return
示例#3
0
 def test_issue193(self):
     """
     Test for issue #193: if non-contiguous array is written correctly.
     """
     warnings.filterwarnings("ignore", "Detected non contiguous data")
     # test all plugins with both read and write method
     formats_write = \
         set(_getEntryPoints('obspy.plugin.waveform', 'writeFormat'))
     formats_read = \
         set(_getEntryPoints('obspy.plugin.waveform', 'readFormat'))
     formats = set.intersection(formats_write, formats_read)
     # mseed will raise exception for int64 data, thus use int32 only
     data = np.arange(10, dtype='int32')
     # make array non-contiguous
     data = data[::2]
     tr = Trace(data=data)
     for format in formats:
         # XXX: skip SEGY and SU formats for now as they need some special
         # headers.
         if format in ['SEGY', 'SU', 'SEG2']:
             continue
         tempfile = NamedTemporaryFile().name
         tr.write(tempfile, format)
         if format == "Q":
             tempfile = tempfile + ".QHD"
         tr_test = read(tempfile, format)[0]
         # clean up
         os.remove(tempfile)
         if format == 'Q':
             os.remove(tempfile[:-4] + '.QBN')
             os.remove(tempfile[:-4])
         np.testing.assert_array_equal(tr.data, tr_test.data)
示例#4
0
 def test_percent_in_str(self):
     """
     Tests if __str__ method is working with percent sign (%).
     """
     tr = Trace()
     tr.stats.station = '%t3u'
     self.assertTrue(tr.__str__().startswith(".%t3u.. | 1970"))
示例#5
0
 def test_len(self):
     """
     Tests the __len__ and count methods of the Trace class.
     """
     trace = Trace(data=np.arange(1000))
     self.assertEquals(len(trace), 1000)
     self.assertEquals(trace.count(), 1000)
示例#6
0
文件: test_core.py 项目: Brtle/obspy
 def setUp(self):
     # directory where the test files are located
     self.path = os.path.join(os.path.dirname(__file__), 'data')
     self.filename_css = os.path.join(self.path, 'test_css.wfdisc')
     self.filename_nnsa = os.path.join(self.path, 'test_nnsa.wfdisc')
     # set up stream for validation
     header = {}
     header['station'] = 'TEST'
     header['starttime'] = UTCDateTime(1296474900.0)
     header['sampling_rate'] = 80.0
     header['calib'] = 1.0
     header['calper'] = 1.0
     header['_format'] = 'CSS'
     filename = os.path.join(self.path, '201101311155.10.ascii.gz')
     with gzip.open(filename, 'rb') as fp:
         data = np.loadtxt(fp, dtype=np.int_)
     # traces in the test files are sorted ZEN
     st = Stream()
     for x, cha in zip(data.reshape((3, 4800)), ('HHZ', 'HHE', 'HHN')):
         # big-endian copy
         tr = Trace(x, header.copy())
         tr.stats.station += 'be'
         tr.stats.channel = cha
         st += tr
         # little-endian copy
         tr = Trace(x, header.copy())
         tr.stats.station += 'le'
         tr.stats.channel = cha
         st += tr
     self.st_result_css = st.copy()
     for tr in st:
         tr.stats['_format'] = "NNSA_KB_CORE"
     self.st_result_nnsa = st
示例#7
0
    def test_SacInstCorrection(self):
        # SAC recommends to taper the transfer function if a pure
        # deconvolution is done instead of simulating a different
        # instrument. This test checks the difference between the
        # result from removing the instrument response using SAC or
        # ObsPy. Visual inspection shows that the traces are pretty
        # much identical but differences remain (rms ~ 0.042). Haven't
        # found the cause for those, yet. One possible reason is the
        # floating point arithmetic of SAC vs. the double precision
        # arithmetic of Python. However differences still seem to be
        # too big for that.
        pzf = os.path.join(self.path, 'SAC_PZs_KARC_BHZ')
        sacf = os.path.join(self.path, 'KARC.LHZ.SAC.asc.gz')
        testsacf = os.path.join(self.path, 'KARC_corrected.sac.asc.gz')
        plow = 160.
        phigh = 4.
        fl1 = 1.0 / (plow + 0.0625 * plow)
        fl2 = 1.0 / plow
        fl3 = 1.0 / phigh
        fl4 = 1.0 / (phigh - 0.25 * phigh)
        #Uncomment the following to run the sac-commands
        #that created the testing file
        #if 1:
        #    import subprocess as sp
        #    p = sp.Popen('sac',shell=True,stdin=sp.PIPE)
        #    cd1 = p.stdin
        #    print >>cd1, "r %s"%sacf
        #    print >>cd1, "rmean"
        #    print >>cd1, "rtrend"
        #    print >>cd1, "taper type cosine width 0.03"
        #    print >>cd1, "transfer from polezero subtype %s to none \
        #    freqlimits %f %f %f %f" % (pzf, fl1, fl2, fl3, fl4)
        #    print >>cd1, "w over ./data/KARC_corrected.sac"
        #    print >>cd1, "quit"
        #    cd1.close()
        #    p.wait()

        stats = {'network': 'KA', 'delta': 0.99999988079072466,
                 'station': 'KARC', 'location': 'S1',
                 'starttime': UTCDateTime(2001, 2, 13, 0, 0, 0, 993700),
                 'calib': 1.00868e+09, 'channel': 'BHZ'}
        tr = Trace(np.loadtxt(sacf), stats)

        attach_paz(tr, pzf, tovel=False)
        tr.data = seisSim(tr.data, tr.stats.sampling_rate,
                          paz_remove=tr.stats.paz, remove_sensitivity=False,
                          pre_filt=(fl1, fl2, fl3, fl4))

        data = np.loadtxt(testsacf)

        # import matplotlib.pyplot as plt
        # plt.plot(tr.data)
        # plt.plot(data)
        # plt.show()
        rms = np.sqrt(np.sum((tr.data - data) ** 2) / \
                      np.sum(tr.data ** 2))
        self.assertTrue(rms < 0.0421)
示例#8
0
 def test_integrate(self):
     """
     Test integration method of trace
     """
     data = np.ones(101) * 0.01
     tr = Trace(data=data)
     tr.stats.delta = 0.1
     tr.integrate(type='cumtrapz')
     np.testing.assert_almost_equal(tr.data[-1], 0.1)
示例#9
0
 def test_taper(self):
     """
     Test taper method of trace
     """
     data = np.ones(10)
     tr = Trace(data=data)
     tr.taper()
     for i in range(len(data)):
         self.assertLessEqual(tr.data[i], 1.)
         self.assertGreaterEqual(tr.data[i], 0.)
示例#10
0
 def test_differentiate(self):
     """
     Test differentiation method of trace
     """
     t = np.linspace(0., 1., 11)
     data = 0.1 * t + 1.
     tr = Trace(data=data)
     tr.stats.delta = 0.1
     tr.differentiate(type='gradient')
     np.testing.assert_array_almost_equal(tr.data, np.ones(11) * 0.1)
示例#11
0
 def test_taper(self):
     """
     Test taper method of trace
     """
     data = np.ones(10)
     tr = Trace(data=data)
     tr.taper()
     for i in range(len(data)):
         self.assertTrue(tr.data[i] <= 1.)
         self.assertTrue(tr.data[i] >= 0.)
示例#12
0
def semblance(st, s, baz, winlen):
    """
    Returns the semblance for a seismic array, for a beam of given slowness and backazimuth.

    Parameters
    ----------
    st : ObsPy Stream object
        Stream of SAC format seismograms for the seismic array, length K = no. of stations in array
    s  : float
        Magnitude of slowness vector, in s / km
    baz : float
        Backazimuth of slowness vector, (i.e. angle from North back to epicentre of event)
    winlen : int
        Length of Hann window over which to calculate the semblance.

    Returns
    -------
    semblance : NumPy array
        The semblance at the given slowness and backazimuth, as a time series.

    """

    # Check that each channel has the same number of samples, otherwise we can't construct the beam properly
    assert len(set([len(tr) for tr in st])) == 1, "Traces in stream have different lengths, cannot stack."

    nsta = len(st)

    stack = linear_stack(st, s, baz)

    # Taper the linear stack
    stack_trace = Trace(stack)
    stack_trace.taper(type="cosine", max_percentage=0.05)
    stack = stack_trace.data

    shifts = get_shifts(st, s, baz)

    # Smooth data with sliding Hann window (i.e. convolution of signal and window function)
    window = np.hanning(winlen)

    shifted_st = st.copy()

    for i, tr in enumerate(shifted_st):
        tr.data = np.roll(tr.data, shifts[i])  # Shift data in each trace by its offset
        tr.taper(type="cosine", max_percentage=0.05)  # Taper it

    # Calculate the power in the beam
    beampower = np.convolve(stack ** 2, window, mode="same")

    # Calculate the summed power of each trace
    tracepower = np.convolve(np.sum([tr.data ** 2 for tr in shifted_st], axis=0), window, mode="same")

    # Calculate semblance
    semblance = nsta * beampower / tracepower

    return semblance
示例#13
0
文件: test_core.py 项目: egdorf/obspy
 def test_writeSACXYWithMinimumStats(self):
     """
     Write SACXY with minimal stats header, no inhereted from SAC file
     """
     tr = Trace()
     tr.stats.delta = 0.01
     tr.data = np.arange(0, 3000)
     sac_file = NamedTemporaryFile().name
     tr.write(sac_file, "SACXY")
     st = read(sac_file)
     os.remove(sac_file)
     self.assertEquals(st[0].stats.delta, 0.01)
     self.assertEquals(st[0].stats.sampling_rate, 100.0)
示例#14
0
 def test_slice(self):
     """
     Tests the slicing of trace objects.
     """
     tr = Trace(data=np.arange(10, dtype='int32'))
     mempos = tr.data.ctypes.data
     t = tr.stats.starttime
     tr1 = tr.slice(t + 2, t + 8)
     tr1.data[0] = 10
     self.assertEqual(tr.data[2], 10)
     self.assertEqual(tr.data.ctypes.data, mempos)
     self.assertEqual(tr.data[2:9].ctypes.data, tr1.data.ctypes.data)
     self.assertEqual(tr1.data.ctypes.data - 8, mempos)
示例#15
0
 def test_writeSmallTrace(self):
     """
     Tests writing Traces containing 0, 1 or 2 samples only.
     """
     for format in ['SLIST', 'TSPAIR']:
         for num in range(0, 4):
             tr = Trace(data=np.arange(num))
             tempfile = NamedTemporaryFile().name
             tr.write(tempfile, format=format)
             # test results
             st = read(tempfile, format=format)
             self.assertEquals(len(st), 1)
             self.assertEquals(len(st[0]), num)
             os.remove(tempfile)
示例#16
0
    def test_trimAllDoesNotChangeDtype(self):
        """
        If a Trace is completely trimmed, e.g. no data samples are remaining,
        the dtype should remain unchanged.

        A trace with no data samples is not really senseful but the dtype
        should not be changed anyways.
        """
        # Choose non native dtype.
        tr = Trace(np.arange(100, dtype='int16'))
        tr.trim(UTCDateTime(10000), UTCDateTime(20000))
        # Assert the result.
        self.assertEqual(len(tr.data), 0)
        self.assertEqual(tr.data.dtype, 'int16')
示例#17
0
 def test_times(self):
     """
     Test if the correct times array is returned for normal traces and
     traces with gaps.
     """
     tr = Trace(data=np.ones(100))
     tr.stats.sampling_rate = 20
     start = UTCDateTime(2000, 1, 1, 0, 0, 0, 0)
     tr.stats.starttime = start
     tm = tr.times()
     self.assertAlmostEquals(tm[-1], tr.stats.endtime - tr.stats.starttime)
     tr.data = np.ma.ones(100)
     tr.data[30:40] = np.ma.masked
     tm = tr.times()
     self.assertTrue(np.alltrue(tr.data.mask == tm.mask))
示例#18
0
 def test_issue317(self):
     """
     Tests times after breaking a stream into parts and merging it again.
     """
     # create a sample trace
     org_trace = Trace(data=np.arange(22487))
     org_trace.stats.starttime = UTCDateTime()
     org_trace.stats.sampling_rate = 0.999998927116
     num_pakets = 10
     # break org_trace into set of contiguous packet data
     traces = []
     packet_length = int(np.size(org_trace.data) / num_pakets)
     delta_time = org_trace.stats.delta
     tstart = org_trace.stats.starttime
     tend = tstart + delta_time * float(packet_length - 1)
     for i in range(num_pakets):
         tr = Trace(org_trace.data, org_trace.stats)
         tr = tr.slice(tstart, tend)
         traces.append(tr)
         tstart = tr.stats.endtime + delta_time
         tend = tstart + delta_time * float(packet_length - 1)
     # reconstruct original trace by adding together packet traces
     sum_trace = traces[0].copy()
     npts = traces[0].stats.npts
     for i in range(1, len(traces)):
         sum_trace = sum_trace.__add__(traces[i].copy(), method=0,
                                       interpolation_samples=0,
                                       fill_value='latest',
                                       sanity_checks=True)
         # check npts
         self.assertEquals(traces[i].stats.npts, npts)
         self.assertEquals(sum_trace.stats.npts, (i + 1) * npts)
         # check data
         np.testing.assert_array_equal(traces[i].data,
                                       np.arange(i * npts, (i + 1) * npts))
         np.testing.assert_array_equal(sum_trace.data,
                                       np.arange(0, (i + 1) * npts))
         # check delta
         self.assertEquals(traces[i].stats.delta, org_trace.stats.delta)
         self.assertEquals(sum_trace.stats.delta, org_trace.stats.delta)
         # check sampling rates
         self.assertAlmostEquals(traces[i].stats.sampling_rate,
                                 org_trace.stats.sampling_rate)
         self.assertAlmostEquals(sum_trace.stats.sampling_rate,
                                 org_trace.stats.sampling_rate)
         # check endtimes
         self.assertEquals(traces[i].stats.endtime, sum_trace.stats.endtime)
示例#19
0
def time_difference(isource, j):
	"""Compute the time difference between data and synthetics

	Input:
	isource = index of the source
	j = scale at which we run the inversion process"""

	namedir1 = 'Source_' + str(isource + 1)
	os.chdir(namedir1)

	filename_d = 'OUTPUT_FILES/data_process.su'
	filename_s = 'OUTPUT_FILES/synthetics_process.su'
	filename_i = 'OUTPUT_FILES/Up_file_single.su'
	stream_d = read(filename_d, format='SU', byteorder='<')
	stream_s = read(filename_s, format='SU', byteorder='<')
	stream_i = read(filename_i, format='SU')

	misfit = 0.0
	stream_adj = Stream()
	for irec in range(0, nrec):
		adj = numpy.zeros(nt_s)
		trace_i = stream_i[irec].copy()
		if irec >= rstart - 1 and irec <= rend - 1:
			trace_d = stream_d[irec].copy()
			trace_s = stream_s[irec].copy()
			if trace_d.data.size != trace_s.data.size:
				raise ValueError("Data and synthetic signals should have the same length")
			nstep = trace_s.data.size
			adj_temp = numpy.zeros(nt_ref)
			starttime = tstart[j - 1] + irec * 25.0 * sstart[j - 1]
			istart = int(starttime / dt_ref)
			for it in range(0, nstep):
				misfit += 0.5 * numpy.power(f * trace_s.data[it] - trace_d.data[it], 2.0)
				adj_temp[istart + it] = f * trace_s.data[it] - trace_d.data[it]
			trace_adj = Trace(data=adj_temp, header=trace_s.stats)
			trace_adj.interpolate(sampling_rate=1.0 / dt_s, starttime=trace_adj.stats.starttime, npts=nt_s)
		else:
			trace_adj = Trace(data=adj, header=trace_i.stats)
		trace_adj.data = numpy.require(trace_adj.data, dtype=numpy.float32)
		stream_adj.append(trace_adj)
	stream_adj.write('SEM/Up_file_single.su.adj', format='SU')
	os.chdir('..')

	return misfit
示例#20
0
def export_mseed(db, filename, pair, components, filterid,corr,ncorr=0):
    try:
        os.makedirs(os.path.split(filename)[0])
    except:
        pass
    filename += ".MSEED"
    maxlag = float(get_config(db,"maxlag"))
    cc_sampling_rate = float(get_config(db,"cc_sampling_rate"))
    
    mytrace = Trace(data=corr)
    mytrace.stats['station'] = pair[:11]
    mytrace.stats['sampling_rate'] = cc_sampling_rate
    mytrace.stats['start_time'] = -maxlag
    mytrace.stats['location'] = "%02i" % ncorr

    st = Stream(traces = [mytrace,])            
    st.write(filename,format='MSEED')
    del st
    return
示例#21
0
def y2m(file, path):
    """
    yspec outputs to SAC format
    """
    stationID = int(file.split('.')[-1])
    chans = ['BHZ', 'BHN', 'BHE']
    dat = np.loadtxt(file)
    npts = len(dat[:,0])
    for i, chan in enumerate(chans):
        stats = {'network': 'SG',
                 'station': 'RS%02d' % stationID,
                 'location': '',
                 'channel': chan,
                 'npts': npts,
                 'sampling_rate': (npts - 1.)/(dat[-1,0] - dat[0,0]),
                 'starttime': t,
                 'mseed': {'dataquality': 'D'}}
        traces = Trace(data=dat[:,1+i], header=stats)
        traces.write(os.path.join(path, 'SAC', 'dis.%s.%s.%s' % (traces.stats.station, traces.stats.location,
                                                                 traces.stats.channel)), format='SAC')
示例#22
0
文件: test_core.py 项目: egdorf/obspy
 def test_writeSmallTrace(self):
     """
     Tests writing Traces containing 0, 1 or 2 samples only.
     """
     for format in ['SH_ASC', 'Q']:
         for num in range(0, 4):
             tr = Trace(data=np.arange(num))
             tempfile = NamedTemporaryFile().name
             if format == 'Q':
                 tempfile += '.QHD'
             tr.write(tempfile, format=format)
             # test results
             st = read(tempfile, format=format)
             self.assertEquals(len(st), 1)
             self.assertEquals(len(st[0]), num)
             # Q files consist of two files - deleting additional file
             if format == 'Q':
                 os.remove(tempfile[:-4] + '.QBN')
                 os.remove(tempfile[:-4])
             os.remove(tempfile)
示例#23
0
 def test_verify(self):
     """
     Tests verify method.
     """
     # empty Trace
     tr = Trace()
     tr.verify()
     # Trace with a single sample (issue #357)
     tr = Trace(data=np.array([1]))
     tr.verify()
     # example Trace
     tr = read()[0]
     tr.verify()
示例#24
0
    def _createStream(self, starttime, endtime, sampling_rate):
        """
        Helper method to create a Stream object that can be used for testing
        waveform plotting.

        Takes the time frame of the Stream to be created and a sampling rate.
        Any other header information will have to be adjusted on a case by case
        basis. Please remember to use the same sampling rate for one Trace as
        merging and plotting will not work otherwise.

        This method will create a single sine curve to a first approximation
        with superimposed 10 smaller sine curves on it.

        :return: Stream object
        """
        time_delta = endtime - starttime
        number_of_samples = time_delta * sampling_rate + 1
        # Calculate first sine wave.
        curve = np.linspace(0, 2 * np.pi, int(number_of_samples // 2))
        # Superimpose it with a smaller but shorter wavelength sine wave.
        curve = np.sin(curve) + 0.2 * np.sin(10 * curve)
        # To get a thick curve alternate between two curves.
        data = np.empty(number_of_samples)
        # Check if even number and adjust if necessary.
        if number_of_samples % 2 == 0:
            data[0::2] = curve
            data[1::2] = curve + 0.2
        else:
            data[-1] = 0.0
            data[0:-1][0::2] = curve
            data[0:-1][1::2] = curve + 0.2
        tr = Trace()
        tr.stats.starttime = starttime
        tr.stats.sampling_rate = float(sampling_rate)
        # Fill dummy header.
        tr.stats.network = 'BW'
        tr.stats.station = 'OBSPY'
        tr.stats.channel = 'TEST'
        tr.data = data
        return Stream(traces=[tr])
示例#25
0
 def test_mergePreviews2(self):
     """
     Test case for issue #84.
     """
     # Note: explicitly creating np.ones instead of np.empty in order to
     # prevent NumPy warnings related to max function
     tr1 = Trace(data=np.ones(2880))
     tr1.stats.starttime = UTCDateTime("2010-01-01T00:00:00.670000Z")
     tr1.stats.delta = 30.0
     tr1.stats.preview = True
     tr1.verify()
     tr2 = Trace(data=np.ones(2881))
     tr2.stats.starttime = UTCDateTime("2010-01-01T23:59:30.670000Z")
     tr2.stats.delta = 30.0
     tr2.stats.preview = True
     tr2.verify()
     st1 = Stream([tr1, tr2])
     st1.verify()
     # merge
     st2 = mergePreviews(st1)
     st2.verify()
     # check
     self.assertTrue(st2[0].stats.preview)
     self.assertEqual(st2[0].stats.starttime, tr1.stats.starttime)
     self.assertEqual(st2[0].stats.endtime, tr2.stats.endtime)
     self.assertEqual(st2[0].stats.npts, 5760)
     self.assertEqual(len(st2[0]), 5760)
示例#26
0
 def test_addTraceWithGap(self):
     """
     Tests __add__ method of the Trace class.
     """
     # set up
     tr1 = Trace(data=np.arange(1000))
     tr1.stats.sampling_rate = 200
     start = UTCDateTime(2000, 1, 1, 0, 0, 0, 0)
     tr1.stats.starttime = start
     tr2 = Trace(data=np.arange(0, 1000)[::-1])
     tr2.stats.sampling_rate = 200
     tr2.stats.starttime = start + 10
     # verify
     tr1.verify()
     tr2.verify()
     # add
     trace = tr1 + tr2
     # stats
     self.assertEquals(trace.stats.starttime, start)
     self.assertEquals(trace.stats.endtime, start + 14.995)
     self.assertEquals(trace.stats.sampling_rate, 200)
     self.assertEquals(trace.stats.npts, 3000)
     # data
     self.assertEquals(len(trace), 3000)
     self.assertEquals(trace[0], 0)
     self.assertEquals(trace[999], 999)
     self.assertTrue(is_masked(trace[1000]))
     self.assertTrue(is_masked(trace[1999]))
     self.assertEquals(trace[2000], 999)
     self.assertEquals(trace[2999], 0)
     # verify
     trace.verify()
示例#27
0
文件: api.py 项目: RDePlaen/MSNoise
def export_mseed(db, filename, pair, components, filterid, corr, ncorr=0, maxlag=None, cc_sampling_rate=None):
    try:
        os.makedirs(os.path.split(filename)[0])
    except:
        pass
    filename += ".MSEED"

    if maxlag is None:
        maxlag = float(get_config(db, "maxlag"))
    if cc_sampling_rate is None:
        cc_sampling_rate = float(get_config(db, "cc_sampling_rate"))

    mytrace = Trace(data=corr)
    mytrace.stats["station"] = pair[:11]
    mytrace.stats["sampling_rate"] = cc_sampling_rate
    mytrace.stats["start_time"] = -maxlag
    mytrace.stats["location"] = "%02i" % ncorr

    st = Stream(traces=[mytrace])
    st.write(filename, format="MSEED")
    del st
    return
示例#28
0
def compute_DWT(isource, j):
	"""Read the results of the simulation in SU file
	and compute the wavelet transform

	Input:
	isource = index of the source
	j = scale at which we run the inversion process"""

	namedir1 = 'Source_' + str(isource + 1)
	os.chdir(namedir1)

	filename_d = '../../Data/data_shot' + str(isource + 1) + '.su'
	filename_s = 'OUTPUT_FILES/Up_file_single.su'
	stream_d = read(filename_d, format='SU')
	stream_s = read(filename_s, format='SU')

	stream_d_DWT = Stream()
	stream_s_DWT = Stream()
	for irec in range(0, nrec):
		trace_d = stream_d[irec].copy()
		trace_s = stream_s[irec].copy()
		# Interpolation: We need the same sampling rate to carry out the DWT
		trace_d.interpolate(sampling_rate=1.0 / dt_ref, starttime=trace_d.stats.starttime, npts=nt_ref)
		trace_s.interpolate(sampling_rate=1.0 / dt_ref, starttime=trace_s.stats.starttime, npts=nt_ref)
		# Discrete Wavelet Transform
		data = trace_d.data
		synthetics = trace_s.data
		(data_DWT, NA_d) = WT(data, nt_ref, j)
		(synthetics_DWT, NA_s) = WT(synthetics, nt_ref, j)
		trace_d_DWT = Trace(data=data_DWT, header=trace_d.stats)
		trace_s_DWT = Trace(data=synthetics_DWT, header=trace_s.stats)
		trace_d_DWT.data = numpy.require(trace_d_DWT.data, dtype=numpy.float32)
		trace_s_DWT.data = numpy.require(trace_s_DWT.data, dtype=numpy.float32)
		stream_d_DWT.append(trace_d_DWT)
		stream_s_DWT.append(trace_s_DWT)
	stream_d_DWT.write('OUTPUT_FILES/data_DWT.su', format='SU', byteorder='<')
	stream_s_DWT.write('OUTPUT_FILES/synthetics_DWT.su', format='SU', byteorder='<')
	os.chdir('..')
示例#29
0
def add_corr(db,station1, station2, filterid, date, time, duration, components, CF, sampling_rate, day=False, ncorr=0):
    output_folder = get_config(db, 'output_folder')
    export_format = get_config(db, 'export_format')
    if export_format == "BOTH":
        mseed = True
        sac = True
    elif export_format == "SAC":
        mseed = False
        sac = True
    elif export_format == "MSEED":
        mseed = True
        sac = False

    if day:
        path = os.path.join("STACKS", "%02i" % filterid, "001_DAYS", components, "%s_%s" % (station1, station2), str(date))
        pair = "%s:%s" % (station1, station2)
        if mseed:
            export_mseed(db, path, pair, components, filterid, CF/ncorr, ncorr)
        if sac:
            export_sac(db, path, pair, components, filterid, CF/ncorr, ncorr)

    else:
        file = '%s.cc' % time
        path = os.path.join(output_folder, "%02i" % filterid, station1, station2, components, date)
        if not os.path.isdir(path):
            os.makedirs(path)

        t = Trace()
        t.data = CF
        t.stats.sampling_rate = sampling_rate
        t.stats.starttime = -float(get_config(db, 'maxlag'))
        t.stats.components = components
        # if ncorr != 0:
            # t.stats.location = "%02i"%ncorr
        st = Stream(traces=[t, ])
        st.write(os.path.join(path, file), format='mseed')
        del t, st
示例#30
0
 def test_slice_noStarttimeOrEndtime(self):
     """
     Tests the slicing of trace objects with no starttime or endtime
     provided. Compares results against the equivalent trim() operation
     """
     tr_orig = Trace(data=np.arange(10, dtype='int32'))
     tr = tr_orig.copy()
     # two time points outside the trace and two inside
     t1 = tr.stats.starttime - 2
     t2 = tr.stats.starttime + 2
     t3 = tr.stats.endtime - 3
     t4 = tr.stats.endtime + 2
     # test 1: only removing data at left side
     tr_trim = tr_orig.copy()
     tr_trim.trim(starttime=t2)
     self.assertEqual(tr_trim, tr.slice(starttime=t2))
     self.assertEqual(tr_trim, tr.slice(starttime=t2, endtime=t4))
     # test 2: only removing data at right side
     tr_trim = tr_orig.copy()
     tr_trim.trim(endtime=t3)
     self.assertEqual(tr_trim, tr.slice(endtime=t3))
     self.assertEqual(tr_trim, tr.slice(starttime=t1, endtime=t3))
     # test 3: not removing data at all
     tr_trim = tr_orig.copy()
     tr_trim.trim(starttime=t1, endtime=t4)
     self.assertEqual(tr_trim, tr.slice())
     self.assertEqual(tr_trim, tr.slice(starttime=t1))
     self.assertEqual(tr_trim, tr.slice(endtime=t4))
     self.assertEqual(tr_trim, tr.slice(starttime=t1, endtime=t4))
     tr_trim.trim()
     self.assertEqual(tr_trim, tr.slice())
     self.assertEqual(tr_trim, tr.slice(starttime=t1))
     self.assertEqual(tr_trim, tr.slice(endtime=t4))
     self.assertEqual(tr_trim, tr.slice(starttime=t1, endtime=t4))
     # test 4: removing data at left and right side
     tr_trim = tr_orig.copy()
     tr_trim.trim(starttime=t2, endtime=t3)
     self.assertEqual(tr_trim, tr.slice(t2, t3))
     self.assertEqual(tr_trim, tr.slice(starttime=t2, endtime=t3))
     # test 5: no data left after operation
     tr_trim = tr_orig.copy()
     tr_trim.trim(starttime=t4)
     self.assertEqual(tr_trim, tr.slice(starttime=t4))
     self.assertEqual(tr_trim, tr.slice(starttime=t4, endtime=t4 + 1))
n = len(evento_sin_medio)
base = datetime.datetime(2020, 3, 4, 17, 20, 10)  #año, mes, día
dates = [base + datetime.timedelta(seconds=(dt * i)) for i in range(n)]
N = len(dates)
np.random.seed(19680801)
y = np.cumsum(np.random.randn(N))
#lims = (np.datetime64('2020-03-04 17:20:10'), np.datetime64('2020-03-04 17:21:10'))
lims = (np.datetime64(inicio), np.datetime64(fin))
plt.plot(dates, evento_sin_medio, color='black')
plt.xlim(lims)
titulo = str(inicio) + '-' + str(fin)
plt.title(titulo)
plt.show()

# Agrego los valores a una variable tipo trace
st_edit = Stream(Trace())
st_edit.append(Trace(data=evento_sin_medio_copy))
tr_edit = st_edit[1]

#%%
#-----------------------------------------------------------------------------
# 5. Aplico filtro pasa-alto (HP)
#-----------------------------------------------------------------------------
tr_edit_hp = tr_edit.copy(
)  # hago una copia para no modificarel evento original

# Defino frec angular digital para el filtro (frec ang dig = frec[Hz]/fm)
dt = tr.meta.delta
fm = 1 / dt  # frec de muestreo
w_hp = 0.1 / fm
tr_edit_hp = tr_edit_hp.filter("highpass",
def new_st(data):
    header = evento.stats
    st_nueva = Stream(Trace())
    st_nueva.append(Trace(data=data, header=header))
    return st_nueva
示例#33
0
from GetANIR import *
import time
from obspy.core import Trace, Stream, AttribDict
jt = time.time()
sta1 = 'CI.PSD'
sta2 = 'CI.SNCC'
comp1 = 'BHZ'
comp2 = 'BHZ'
ANIR = MyGetANIR(sta1,sta2,comp1,comp2)
print ">>> Job Finished. It took %.2f seconds" %(time.time()-jt)
plt.plot(ANIR)
plt.show()

pair = "%s:%s" % (sta1,sta2)
out_path = '/Users/Hugh/Documents/C1_results/'+sta1+'/'
filename = out_path + sta1.split('.')[1] + '_' + sta2.split('.')[1]+'.SAC'
try:
	os.makedirs(os.path.split(filename)[0])
except:
	pass

mytrace = Trace(data=ANIR)
mytrace.stats['station'] = pair
mytrace.stats.sac = AttribDict()
mytrace.stats.sac.npts = len(ANIR)

st = Stream(traces=[mytrace,])
st.write(filename,format ='SAC')
del st
示例#34
0
    def save_wave(self):

        # Fetch a wave from Ring 0
        wave = self.ring2buff.get_wave(0)

        # if wave is empty return
        if wave == {}:
            return

        # Lets try to buffer with python dictionaries and obspy
        name = wave["station"] + '.' + wave["channel"] + '.' + wave[
            "network"] + '.' + wave["location"]

        if name in self.wave_buffer:

            # Determine max samples for buffer
            max_samp = wave["samprate"] * 60 * self.minutes

            # Create a header:
            wavestats = Stats()
            wavestats.station = wave["station"]
            wavestats.network = wave["network"]
            wavestats.channel = wave["channel"]
            wavestats.location = wave["location"]
            wavestats.sampling_rate = wave["samprate"]
            wavestats.starttime = UTCDateTime(wave['startt'])

            # Create a trace
            wavetrace = Trace(header=wavestats)
            wavetrace.data = wave["data"]

            # Try to append data to buffer, if gap shutdown.
            try:
                self.wave_buffer[name].append(wavetrace,
                                              gap_overlap_check=True)
            except TypeError as err:
                logger.warning(err)
                self.runs = False
            except:
                raise
                self.runs = False

            # Debug data
            if self.debug:
                logger.info("Station Channel combo is in buffer:")
                logger.info(name)
                logger.info("Size:")
                logger.info(self.wave_buffer[name].count())
                logger.debug("Data:")
                logger.debug(self.wave_buffer[name])

        else:
            # First instance of data in buffer, create a header:
            wavestats = Stats()
            wavestats.station = wave["station"]
            wavestats.network = wave["network"]
            wavestats.channel = wave["channel"]
            wavestats.location = wave["location"]
            wavestats.sampling_rate = wave["samprate"]
            wavestats.starttime = UTCDateTime(wave['startt'])

            # Create a trace
            wavetrace = Trace(header=wavestats)
            wavetrace.data = wave["data"]

            # Create a RTTrace
            rttrace = RtTrace(int(self.minutes * 60))
            self.wave_buffer[name] = rttrace

            # Append data
            self.wave_buffer[name].append(wavetrace, gap_overlap_check=True)

            # Debug data
            if self.debug:
                logger.info("First instance of station/channel:")
                logger.info(name)
                logger.info("Size:")
                logger.info(self.wave_buffer[name].count())
                logger.debug("Data:")
                logger.debug(self.wave_buffer[name])
示例#35
0
def conv_traces(tr1, tr2, normal=True):
    """ Convolve two Traces and merge their meta-information

    It convolves the data stored in two :class:`~obspy.core.trace.Trace`
    Objects in frequency domain. If ``normal==True`` the resulting correlation
    data are normalized by a factor of
    :func:`sqrt(||tr1.data||^2 x ||tr2.data||^2)`

    Meta-informations associated to the resulting Trace are obtained through:

        - Merging the original meta-informations of the two input traces
          according to the :func:`~miic.core.corr_fun.combine_stats` function.

        - Adding the original two `Stats` objects to the newly
          created :class:`~obspy.core.trace.Trace` object as:
          >>> conv_tr.stats_tr1 = tr1.stats
          >>> conv_tr.stats_tr2 = tr2.stats
        - Fixing:
          >>> conv_tr.stats['npts'] = '...number of correlation points...'
          >>> conv_tr.stats['starttime'] = tr2.stats['starttime'] -
              tr1.stats['starttime']

    :type tr1: :class:`~obspy.core.trace.Trace`
    :param tr1: First Trace
    :type tr2: :class:`~obspy.core.trace.Trace`
    :param tr2: Second Trace
    :type normal: bool
    :param normal: Normalization flag

    :rtype: :class:`~obspy.core.trace.Trace`
    :return: **conv_tr**: Trace that stores convolved data and meta-information

    """

    if not isinstance(tr1, Trace):
        raise TypeError("tr1 must be an obspy Trace object.")

    if not isinstance(tr2, Trace):
        raise TypeError("tr2 must be an obspy Trace object.")
    
    zerotime = UTCDateTime(1971, 1, 1, 0, 0, 0)
    conv_tr = Trace()

    # extend traces to the next power of 2 of the longest trace
    lt = pow(2, np.ceil(np.log2(np.max([tr1.stats['npts'],
             tr2.stats['npts']]))))
    s1 = extend(tr1.data, method='zeros', length='fixed',size=lt)
    s2 = extend(tr2.data, method='zeros', length='fixed',size=lt)

    # create the combined stats
    conv_tr.stats = combine_stats(tr1, tr2)
    conv_tr.stats_tr1 = tr1.stats
    conv_tr.stats_tr2 = tr2.stats

    conv_tr.stats_tr1.npts = min(tr1.stats.npts, tr2.stats.npts)
    conv_tr.stats_tr2.npts = min(tr1.stats.npts, tr2.stats.npts)

    if normal:
        denom = np.sqrt(np.dot(s1.astype(np.float64), s1.T) *
                        np.dot(s2.astype(np.float64), s2.T))
    else:
        denom = 1.

    # remaining offset in samples (just remove fractions of samples)
    roffset = np.round((tr2.stats.starttime - tr1.stats.starttime) *
                        tr1.stats.sampling_rate)
    offset = (tr2.stats.starttime - tr1.stats.starttime) * \
        tr1.stats.sampling_rate - roffset
    # remaining offset in seconds
    roffset /= tr1.stats.sampling_rate


    convData = _fftconvolve(s1[::-1], s2, offset)
    convData = np.multiply(convData, (1 / denom))
    
    # set number of samples
    conv_tr.stats['npts'] = convData.shape[0]

    # time lag of the zero position, i.e. lag time of alignent
    t_offset_zeroleg = (float(convData.shape[0]) - 1.) / \
        (2. * tr1.stats.sampling_rate)

    # set starttime
    conv_tr.stats['starttime'] = zerotime - t_offset_zeroleg + \
        roffset

    conv_tr.data = convData

    return conv_tr
示例#36
0
    def dcomp_find_azim(self, xmin=None, xmax=None):
        """
        Method to decompose radial and transverse receiver function 
        streams into back-azimuth harmonics and determine the main 
        orientation ``azim``, obtained by minimizing the B1 component
        between ``xmin`` and ``xmax`` (i.e., time or depth).

        Parameters
        ----------
        xmin : float
            Minimum x axis value over which to calculate ``azim``
        xmax : float
            Maximum x axis value over which to calculate ``azim``

        Attributes
        ----------
        hstream : :class:`~obspy.core.Stream`
            Stream containing the 5 harmonics, oriented in direction ``azim``
        azim : float
            Direction (azimuth) along which the B1 component of the stream
            is minimized (between ``xmin`` and ``xmax``)
        var : :class:`~numpy.ndarray`
            Variance of the 5 harmonics between ``xmin`` and ``xmax``

        """

        if not xmin:
            xmin = self.xmin
        if not xmax:
            xmax = self.xmax

        print()
        print('Decomposing receiver functions into baz harmonics')

        # Some integers
        nbin = len(self.radialRF)
        nz = len(self.radialRF[0].data)
        naz = 180
        daz = np.float(360 / naz)
        deg2rad = np.pi / 180.

        # Define depth range over which to calculate azimuth
        indmin = int(xmin / self.radialRF[0].stats.delta)
        indmax = int(xmax / self.radialRF[0].stats.delta)

        # Copy stream stats
        str_stats = self.radialRF[0].stats

        # Initialize work arrays
        C0 = np.zeros((nz, naz))
        C1 = np.zeros((nz, naz))
        C2 = np.zeros((nz, naz))
        C3 = np.zeros((nz, naz))
        C4 = np.zeros((nz, naz))

        # Loop over each depth step
        for iz in range(nz):

            # Build matrices OBS and H for each azimuth
            for iaz in range(naz):

                # Initialize work arrays
                OBS = np.zeros(2 * nbin)
                H = np.zeros((2 * nbin, 5))

                azim = iaz * daz

                # Radial component
                for irow, trace in enumerate(self.radialRF):

                    baz = trace.stats.baz
                    OBS[irow] = trace.data[iz]
                    H[irow, 0] = 1.0
                    H[irow, 1] = np.cos(deg2rad * (baz - azim))
                    H[irow, 2] = np.sin(deg2rad * (baz - azim))
                    H[irow, 3] = np.cos(2. * deg2rad * (baz - azim))
                    H[irow, 4] = np.sin(2. * deg2rad * (baz - azim))

                shift = 90.

                # Transverse component
                for irow, trace in enumerate(self.transvRF):

                    baz = trace.stats.baz
                    OBS[irow + nbin] = trace.data[iz]
                    H[irow + nbin, 0] = 0.0
                    H[irow + nbin, 1] = np.cos(deg2rad * (baz + shift - azim))
                    H[irow + nbin, 2] = np.sin(deg2rad * (baz + shift - azim))
                    H[irow + nbin,
                      3] = np.cos(2. * deg2rad * (baz + shift / 2.0 - azim))
                    H[irow + nbin,
                      4] = np.sin(2. * deg2rad * (baz + shift / 2.0 - azim))

                # Solve system of equations with truncated SVD
                u, s, v = np.linalg.svd(H)
                s[s < 0.001] = 0.
                CC = np.linalg.solve(s[:, None] * v, u.T.dot(OBS)[:5])

                # Fill up arrays
                C0[iz, iaz] = np.float(CC[0])
                C1[iz, iaz] = np.float(CC[1])
                C2[iz, iaz] = np.float(CC[2])
                C3[iz, iaz] = np.float(CC[3])
                C4[iz, iaz] = np.float(CC[4])

        # Minimize variance of third component over specific depth range to
        # find azim
        C1var = np.zeros(naz)
        for iaz in range(naz):
            C1var[iaz] = np.sqrt(np.mean(np.square(C1[indmin:indmax, iaz])))
        indaz = np.argmin(C1var)

        C0var = np.sqrt(np.mean(np.square(C0[indmin:indmax, indaz])))
        C1var = np.sqrt(np.mean(np.square(C1[indmin:indmax, indaz])))
        C2var = np.sqrt(np.mean(np.square(C2[indmin:indmax, indaz])))
        C3var = np.sqrt(np.mean(np.square(C3[indmin:indmax, indaz])))
        C4var = np.sqrt(np.mean(np.square(C4[indmin:indmax, indaz])))

        # Put back into traces
        A = Trace(data=C0[:, indaz], header=str_stats)
        B1 = Trace(data=C1[:, indaz], header=str_stats)
        B2 = Trace(data=C2[:, indaz], header=str_stats)
        C1 = Trace(data=C3[:, indaz], header=str_stats)
        C2 = Trace(data=C4[:, indaz], header=str_stats)

        # Put all treaces into stream
        self.hstream = Stream(traces=[A, B1, B2, C1, C2])
        self.azim = indaz * daz
        self.var = [C0var, C1var, C2var, C3var, C4var]
示例#37
0
    result[empty_spots:empty_spots + spots] = diff
    # Merge the seperately treated data values.
    if samples_before_start and samples_before_start > result[0]:
        result[0] = samples_before_start
    if samples_after_end and samples_after_end > result[-1]:
        result[-1] = samples_after_end
    if lost_samples and first_spot > result[empty_spots]:
        result[empty_spots] = first_spot
    if free_samples and end_samples > result[empty_spots + spots]:
        result[empty_spots + spots] = end_samples

    # Check for masked values.
    if is_masked(result):
        result.fill_value = 0.0
        result = result.filled()

    # Create new stream.
    tr = Trace(data=result)
    stats = st[0].stats
    stats.starttime = day_starttime
    # Hard code the sampling rate for safe handling of leap seconds and
    # floating point inaccuracies. This will result in 1000 samples with the
    # last sample exactly one delta step away from the beginning of the next
    # day.
    stats.sampling_rate = 0.0115740740741
    stats.npts = 1000
    tr.stats = stats
    out_stream = Stream(traces=[tr])
    # Write the stream.
    out_stream.write(file + '_index.mseed', format='MSEED')
示例#38
0
data = numpy.zeros([datapoints], dtype=numpy.int16)

starttime = UTCDateTime()
adc.start_adc_difference(0, gain=GAIN, data_rate=sps)

for x in range(datapoints):
    #	sample = adc.read_adc_difference(0, gain=GAIN)
    sample = adc.get_last_result()
    data[x] = sample
    timenow = UTCDateTime()
    #print sample,timenow
adc.stop_adc()

stats = {
    'network': 'TV',
    'station': 'RASPI',
    'location': '00',
    'channel': 'BHZ',
    'npts': datapoints,
    'sampling_rate': sampling,
    'mseed': {
        'dataquality': 'D'
    },
    'starttime': starttime
}

stream = Stream([Trace(data=data, header=stats)])

stream.write('test.mseed', format='MSEED', encoding='INT16', reclen=512)
stream.plot()
示例#39
0
    'ORIGINAL_DATA_MEDIATOR_CITATION']
header['dyna']['ORIGINAL_DATA_MEDIATOR'] = headers['ORIGINAL_DATA_MEDIATOR']
header['dyna']['ORIGINAL_DATA_CREATOR_CITATION'] = headers[
    'ORIGINAL_DATA_CREATOR_CITATION']
header['dyna']['ORIGINAL_DATA_CREATOR'] = headers['ORIGINAL_DATA_CREATOR']
header['dyna']['USER1'] = headers['USER1']
header['dyna']['USER2'] = headers['USER2']
header['dyna']['USER3'] = headers['USER3']
header['dyna']['USER4'] = headers['USER4']
header['dyna']['USER5'] = headers['USER5']

# read data
data = np.loadtxt(fh, dtype='float32')
if headers['DATA_TYPE'][-8:] == "SPECTRUM":
    data_1 = np.array([], dtype=np.float32)
    data_2 = np.array([], dtype=np.float32)
    for j in xrange(len(data)):
        for i in xrange(2):
            if i == 0:
                data_1 = np.append(data_1, data[j][i])
            elif i == 1:
                data_2 = np.append(data_2, data[j][i])
    stream.append(Trace(data=data_1, header=header))
    stream.append(Trace(data=data_2, header=header))
else:
    stream.append(Trace(data=data, header=header))

fh.close()

stream.write(filename_out, format=format_out)
示例#40
0
def spotlme(lat, lon, dep, loading, stime, etime, srate, debug=True):
    if loading:

        tides = ['k1', 'k2', 'm2', 'm4', 'mf', 'mm', 'n2', 'o1', 'p1', 'q1', 's2']

        for idx, tide in enumerate(tides):
            string = '../bin/nloadf TEMP ' + str(lat) + ' ' + str(lon) + ' ' + str(dep) + ' '
            string += tide + '.osu.tpxo72.2010 ' + 'green.gbavap.std l'
            if idx == 0:
                pipe = ' >'
            else: 
                pipe = ' >>'
            cmd = string + ' ' + pipe + ' LoadALL' 
            os.system(cmd)
            if debug:
                print(cmd)
        st = Stream()
        for comp in ['Z', 'N', 'E']:
            if debug:
                print('On component:' + comp)
            
            cmd = 'cat LoadALL | ../bin/harprp '
            if comp == 'Z':
                cmd += 'g '
            elif comp == 'N':
                cmd += 't 0 '
            else:
                cmd += 't 90 '
            cmd += '> tempLoad2'    
            if debug:
                print(cmd)
            os.system(cmd)

            ctime = stime
            #cmd = 'cat tempLoad2 | ../bin/loadcomb t'
            tstring = str(ctime.year) + ' ' + str(ctime.julday) + ' 0 0 0'
            vals = int((etime -stime)/(srate))
            cmd = 'cat tempLoad2 | ../bin/hartid ' + tstring + ' ' + str(vals) + ' ' + str(srate) + ' > temp' + comp
            if debug:
                print(cmd)
            os.system(cmd)

            f=open('temp' + comp,'r')
            data = []
            for line in f:
                if comp == 'Z':
                    data.append(-float(line))
                else:
                    data.append(-float(line))

            f.close()
            data = np.array(data)
            stats = {'network': 'XX', 'station': sta, 'location': '',
                'channel' : 'LH' + comp, 'npts': len(data), 'sampling_rate': 1./srate,
                'mseed' : {'dataquality': 'D'}}
            stats['starttime'] = ctime
            st += Stream([Trace(data=data, header = stats)])
            ctime += 24.*60.*60.
            os.remove('temp' + comp)
            os.remove('tempLoad2')
        oldfiles = glob.glob('tempLoad.*')
        for curfile in oldfiles:
            os.remove(curfile)
        st.merge()
    else:
        # Here we just do the tides with no loading
        pfile = open('para_file','w')
        pfile.write(str(stime.year) + ',' + str(stime.julday) + ',0\n')
        pfile.write(str(etime.year) + ',' + str(etime.julday) + ',0\n')
        # In terms of 1 hour
        pfile.write(str(srate/(60*60)) + '\n')
        pfile.write('t\n')
        pfile.write(str(lat) + '\n')
        pfile.write(str(lon) + '\n')
        # compute gravity
        pfile.write('1\n')
        # Compute two tilt tides
        pfile.write('2\n')
        # compute no strain
        pfile.write('0\n')
        pfile.write('0\n')
        pfile.write('90\n')
        pfile.write('tempZ\n')
        pfile.write('tempN\n')
        pfile.write('tempE\n')
        pfile.close()
        cmd =  '../bin/ertid < para_file'
        os.system(cmd)
        os.remove('para_file')
        st = Stream()
        for comp in ['Z', 'N', 'E']:
            f=open('temp' + comp,'r')
            data = []
            for line in f:
                if comp == 'Z':
                    data.append(-float(line))
                else:
                    data.append(-float(line))

            f.close()
            os.remove('temp' + comp)
            data = np.asarray(data)
            stats = {'network': 'YY', 'station': sta, 'location': '',
                    'channel' : 'UH' + comp, 'npts': len(data), 'sampling_rate': 1./srate,
                    'mseed' : {'dataquality': 'D'}}
            stats['starttime'] = stime 
            st += Stream([Trace(data=data, header = stats)])
    
    return st
示例#41
0
def tuple2mseed(infile, user_ch1, user_ch2, user_ch3, user_ch4):
    # 1) Make sure user inputs are correct (Convert to real -no symlink- and full path)
    infile = os.path.normcase(infile)
    infile = os.path.normpath(infile)
    infile = os.path.realpath(infile)
    print(infile)

    # 2) If TUPLE, convert to MSEED
    if ".tuple" in infile:

        # 3) Get header and extra info
        arg = "[tuple2mseed] \".tuple\" file %s" % infile
        print(arg)
        resp_dict = TupleParser.get_info(infile)
        tuple_filename_station = resp_dict['station']
        tuple_header_sps = resp_dict['sps']
        tuple_header_starttime = resp_dict['starttime']
        # exit(0)

        # 4) Get samples
        arg = "[tuple2mseed] Creating MSEED files for every channel .."
        print(arg)
        resp_dict = TupleParser.get_and_correct_missing_samples(
            infile_path=infile,
            starttime=tuple_header_starttime,
            sps=tuple_header_sps)
        data_ch4 = resp_dict['ch4']
        data_ch3 = resp_dict['ch3']
        data_ch2 = resp_dict['ch2']
        data_ch1 = resp_dict['ch1']

        # >> Substract DC component
        data = np.int32(data_ch1)
        data = data - np.mean(data)
        data = np.int32(data)
        # >> Convert channel to MSEED
        channel = user_ch1
        # Fill header attributes
        stats = {
            'network': 'UNK',
            'station': tuple_filename_station,
            'location': 'UNK',
            'channel': channel,
            'npts': len(data),
            'sampling_rate': tuple_header_sps,
            'mseed': {
                'dataquality': 'D'
            },
            'starttime': UTCDateTime(str(tuple_header_starttime))
        }
        st = Stream([Trace(data=data, header=stats)])

        # >> Write to disk
        # print(tuple_header_starttime)
        outfile_name = tuple_header_starttime.split(".")
        # print(tuple_header_starttime)
        outfile_name = outfile_name[0]
        outfile_name = outfile_name + "_" + tuple_filename_station + "_" + channel + ".MSEED"
        outfile_name = outfile_name.replace(":", "-")
        st.write(outfile_name,
                 format='MSEED',
                 encoding=11,
                 reclen=256,
                 byteorder='>')
        #st.write(outfile_name, format='MSEED', encoding=0, reclen=256)
        st1 = read(outfile_name)
        arg = "[tuple2mseed] MSEED created: %s" % st1[0]
        print(arg)
        # print(st1[0])
        # print(st1[0].stats)
        # print(st1[0].data)

        # >> Substract DC component
        data = np.int32(data_ch2)
        data = data - np.mean(data)
        data = np.int32(data)
        # >> Convert channel to MSEED
        channel = user_ch2
        # Fill header attributes
        stats = {
            'network': 'UNK',
            'station': tuple_filename_station,
            'location': 'UNK',
            'channel': channel,
            'npts': len(data),
            'sampling_rate': tuple_header_sps,
            'mseed': {
                'dataquality': 'D'
            },
            'starttime': UTCDateTime(str(tuple_header_starttime))
        }
        st = Stream([Trace(data=data, header=stats)])

        # >> Write to disk
        # print(tuple_header_starttime)
        outfile_name = tuple_header_starttime.split(".")
        # print(tuple_header_starttime)
        outfile_name = outfile_name[0]
        outfile_name = outfile_name + "_" + tuple_filename_station + "_" + channel + ".MSEED"
        outfile_name = outfile_name.replace(":", "-")
        st.write(outfile_name,
                 format='MSEED',
                 encoding=11,
                 reclen=256,
                 byteorder='>')
        #st.write(outfile_name, format='MSEED', encoding=0, reclen=256)
        st1 = read(outfile_name)
        arg = "[tuple2mseed] MSEED created: %s" % st1[0]
        print(arg)
        # print(st1[0])
        # print(st1[0].stats)
        # print(st1[0].data)

        # >> Substract DC component
        data = np.int32(data_ch3)
        data = data - np.mean(data)
        data = np.int32(data)
        # >> Convert channel to MSEED
        channel = user_ch3
        # Fill header attributes
        stats = {
            'network': 'UNK',
            'station': tuple_filename_station,
            'location': 'UNK',
            'channel': channel,
            'npts': len(data),
            'sampling_rate': tuple_header_sps,
            'mseed': {
                'dataquality': 'D'
            },
            'starttime': UTCDateTime(str(tuple_header_starttime))
        }
        st = Stream([Trace(data=data, header=stats)])

        # >> Write to disk
        # print(tuple_header_starttime)
        outfile_name = tuple_header_starttime.split(".")
        # print(tuple_header_starttime)
        outfile_name = outfile_name[0]
        outfile_name = outfile_name + "_" + tuple_filename_station + "_" + channel + ".MSEED"
        outfile_name = outfile_name.replace(":", "-")
        st.write(outfile_name,
                 format='MSEED',
                 encoding=11,
                 reclen=256,
                 byteorder='>')
        #st.write(outfile_name, format='MSEED', encoding=0, reclen=256)
        st1 = read(outfile_name)
        arg = "[tuple2mseed] MSEED created: %s" % st1[0]
        print(arg)
        # print(st1[0])
        # print(st1[0].stats)
        # print(st1[0].data)

        # >> Substract DC component
        data = np.int32(data_ch4)
        data = data - np.mean(data)
        data = np.int32(data)
        # >> Convert channel to obspy Stream
        channel = user_ch4
        # Fill header attributes
        stats = {
            'network': 'UNK',
            'station': tuple_filename_station,
            'location': 'UNK',
            'channel': channel,
            'npts': len(data),
            'sampling_rate': tuple_header_sps,
            'mseed': {
                'dataquality': 'D'
            },
            'starttime': UTCDateTime(str(tuple_header_starttime))
        }
        st = Stream([Trace(data=data, header=stats)])

        # >> Write to disk in MSEED format
        # print(tuple_header_starttime)
        outfile_name = tuple_header_starttime.split(".")
        # print(tuple_header_starttime)
        outfile_name = outfile_name[0]
        outfile_name = outfile_name + "_" + tuple_filename_station + "_" + channel + ".MSEED"
        outfile_name = outfile_name.replace(":", "-")
        st.write(outfile_name,
                 format='MSEED',
                 encoding=11,
                 reclen=256,
                 byteorder='>')
        #st.write(outfile_name, format='MSEED', encoding=0, reclen=256)
        st1 = read(outfile_name)
        arg = "[tuple2mseed] MSEED created: %s" % st1[0]
        print(arg)
        # print(st1[0])
        # print(st1[0].stats)
        # print(st1[0].data)

    else:
        arg = "File %s does NOT end with \".tuple\"" % infile[0]
        print(arg)
示例#42
0
def mtinv_constrained(input_set, st_tr, st_g, fmin, fmax, nsv=1, single_force=False,
          stat_subset=[], weighting_type=2, weights=[], cache_path='',
          force_recalc=False, cache=True, constrained_sources=None):
    '''
    Not intended for direct use, use mtinv_gs instead!
    '''
    utrw, weights_l2, S0w, df, dt, nstat, ndat, ng, nfft, nfinv = input_set

    # setup greens matrix in fourier space
    if os.path.isfile(cache_path + 'gw.pickle') and not force_recalc:
        # read G-matrix from file if exists
        gw = pickle.load(open(cache_path + 'gw.pickle'))
        if gw.shape[-1] < nfinv:
            force_recalc = True
        else:
            gw = gw[:,:,:nfinv]

    if not os.path.isfile(cache_path + 'gw.pickle') or force_recalc:
        g = np.zeros((nstat * 3, 6 + single_force * 3, ng))
        #gw = np.zeros((nstat * 3, 6 + single_force * 3, nfft/2+1)) * 0j
        gw = np.zeros((nstat * 3, 6 + single_force * 3, nfinv)) * 0j

        for k in np.arange(nstat):
            for i in np.arange(3):
                for j in np.arange(6 + single_force * 3):
                    g[k*3 + i,j,:] = st_g.select(station='%04d' % (k + 1),
                                     channel='%02d%1d' % (i,j))[0].data
                    # fill greens matrix in freq space, deconvolve S0
                    gw[k*3 + i,j,:] = np.fft.rfft(g[k*3 + i,j,:], n=nfft) \
                                                    [:nfinv] * dt / S0w
                    

        # write G-matrix to file
        if cache:
            pickle.dump(gw, open(cache_path + 'gw.pickle', 'w'), protocol=2)

    # setup channel subset from station subset
    if stat_subset == []:
        stat_subset = np.arange(nstat)
    else:
        stat_subset = np.array(stat_subset) - 1

    chan_subset = np.zeros(stat_subset.size*3, dtype=int)
    for i in np.arange(stat_subset.size):
        chan_subset[i*3:(i+1)*3] = stat_subset[i]*3 + np.array([0,1,2])

    # setup weighting matrix (depending on weighting scheme and apriori
    # weighting)
    
    # a priori weighting   
    if weights == []:
        weights = np.ones(nstat)
    elif len(weights) == stat_subset.size:
        weights = np.array(weights)
        buf = np.ones(nstat)
        buf[stat_subset] = weights
        weights = buf
    elif len(weights) == nstat:
        weights = np.array(weights)
    else:
        raise ValueError('argument weights has wrong length')
    
    chan_weights = np.zeros(nstat*3)
    for i in np.arange(nstat):
        chan_weights[i*3:(i+1)*3] = weights[i] + np.zeros(3)

    # l2-norm weighting
    if weighting_type == 0:
        weights_l2 *= chan_weights
        weights_l2 = np.ones(nstat*3) * (weights_l2[chan_subset].sum())**.5
    elif weighting_type == 1:
        weights_l2 = weights_l2**.5
    elif weighting_type == 2:
        for k in np.arange(nstat):
            weights_l2[k*3:k*3 + 3] = (weights_l2[k*3:k*3 + 3].sum())**.5
    else:
        raise ValueError('argument weighting_type needs to be in [0,1,2]')
   
    weights_l2 = 1./weights_l2
    weightsm = np.matrix(np.diag(weights_l2[chan_subset] *
                         chan_weights[chan_subset]**.5))
    
   
    mf = np.zeros(constrained_sources.shape[0])
    stfl = []
    stl = []

    for nn, const_source in enumerate(constrained_sources):
        stf = np.zeros(nfft/2+1) * 0j

        # inversion
        for w in np.arange(nfinv):
            GM = weightsm * np.matrix(gw[[chan_subset],:,w]) * np.matrix(const_source).T
            GI = np.linalg.pinv(GM, rcond=0.00001)
            m = GI * weightsm * np.matrix(utrw[[chan_subset],w]).T
            stf[w] = m[0,0]

        # back to time domain
        stf_t = np.fft.irfft(stf)[:nfft] * df
        
        stf_t = lowpass(stf_t, fmax, df, corners=4)

        # compute synthetic seismograms (for stations included in the inversion
        # only - maybe it makes sense to do it for all so that indizes in the
        # streams are the same in input and ouput)

        channels = ['u', 'v', 'w']

        M_t = np.zeros((6, nfft))
        for i in np.arange(6):
            M_t[i] = stf_t * const_source[i]

        traces = []
        stff = np.fft.rfft(M_t, n=nfft) * dt

        for k in stat_subset:
            for i in np.arange(3):
                
                data = np.zeros(ndat)
                for j in np.arange(6 + single_force * 3):
                    dummy = np.concatenate((gw[k*3 + i,j,:], np.zeros(nfft/2 + 1 -
                                            nfinv))) * stff[j]
                    dummy = np.fft.irfft(dummy)[:ndat] / dt
                    data += dummy

                stats = {'network': 'SY', 
                         'station': '%04d' % (k+1), 
                         'location': '',
                         'channel': channels[i],
                         'npts': len(data), 
                         'sampling_rate': st_tr[0].stats.sampling_rate,
                         'starttime': st_tr[0].stats.starttime,
                         'mseed' : {'dataquality': 'D'}}
                traces.append(Trace(data=data, header=stats))

        st_syn = Stream(traces)
        
        # compute misfit
        misfit = 0.

        for k in stat_subset:
            for i, chan in enumerate(['u', 'v', 'w']):
                u = st_tr.select(station='%04d' % (k+1), channel=chan)[0].data.copy()
                Gm = st_syn.select(station='%04d' % (k+1), channel=chan)[0].data.copy()
                misfit += weights_l2[k*3 + i]**2 * chan_weights[k*3 + i] * \
                          cumtrapz((u - Gm)**2, dx=dt)[-1]

        if weighting_type == 1:
            misfit /= chan_weights[chan_subset].sum()
        elif weighting_type == 2:
            misfit /= weights[stat_subset].sum()

        mf[nn] = misfit
        stfl.append(stf_t)
        stl.append(st_syn)

    am = mf.argmin()

    return constrained_sources[am], stfl[am], mf[am], stl[am]
示例#43
0
文件: core.py 项目: msimon00/obspy
def readSEISAN(filename, headonly=False, **kwargs):  # @UnusedVariable
    """
    Reads a SEISAN file and returns an ObsPy Stream object.

    .. warning::
        This function should NOT be called directly, it registers via the
        ObsPy :func:`~obspy.core.stream.read` function, call this instead.

    :type filename: str
    :param filename: SEISAN file to be read.
    :rtype: :class:`~obspy.core.stream.Stream`
    :return: A ObsPy Stream object.

    .. rubric:: Example

    >>> from obspy.core import read
    >>> st = read("/path/to/2001-01-13-1742-24S.KONO__004")
    >>> st  # doctest: +ELLIPSIS
    <obspy.core.stream.Stream object at 0x...>
    >>> print(st)  # doctest: +ELLIPSIS
    4 Trace(s) in Stream:
    .KONO.0.B0Z | 2001-01-13T17:45:01.999000Z - ... | 20.0 Hz, 6000 samples
    .KONO.0.L0Z | 2001-01-13T17:42:24.924000Z - ... | 1.0 Hz, 3542 samples
    .KONO.0.L0N | 2001-01-13T17:42:24.924000Z - ... | 1.0 Hz, 3542 samples
    .KONO.0.L0E | 2001-01-13T17:42:24.924000Z - ... | 1.0 Hz, 3542 samples
    """
    def _readline(fh, length=80):
        data = fh.read(length + 8)
        end = length + 4
        start = 4
        return data[start:end]

    # read data chunk from given file
    fh = open(filename, 'rb')
    data = fh.read(80 * 12)
    # get version info from file
    (byteorder, arch, _version) = _getVersion(data)
    # fetch lines
    fh.seek(0)
    # start with event file header
    # line 1
    data = _readline(fh)
    number_of_channels = int(data[30:33])
    # calculate number of lines with channels
    number_of_lines = number_of_channels // 3 + (number_of_channels % 3 and 1)
    if number_of_lines < 10:
        number_of_lines = 10
    # line 2
    data = _readline(fh)
    # line 3
    for _i in xrange(0, number_of_lines):
        data = _readline(fh)
    # now parse each event file channel header + data
    stream = Stream()
    dlen = arch / 8
    dtype = byteorder + 'i' + str(dlen)
    stype = '=i' + str(dlen)
    for _i in xrange(number_of_channels):
        # get channel header
        temp = _readline(fh, 1040)
        # create Stats
        header = Stats()
        header['network'] = (temp[16] + temp[19]).strip()
        header['station'] = temp[0:5].strip()
        header['location'] = (temp[7] + temp[12]).strip()
        header['channel'] = (temp[5:7] + temp[8]).strip()
        header['sampling_rate'] = float(temp[36:43])
        header['npts'] = int(temp[43:50])
        # create start and end times
        year = int(temp[9:12]) + 1900
        month = int(temp[17:19])
        day = int(temp[20:22])
        hour = int(temp[23:25])
        mins = int(temp[26:28])
        secs = float(temp[29:35])
        header['starttime'] = UTCDateTime(year, month, day, hour, mins) + secs
        if headonly:
            # skip data
            fh.seek(dlen * (header['npts'] + 2), 1)
            stream.append(Trace(header=header))
        else:
            # fetch data
            data = np.fromfile(fh, dtype=dtype, count=header['npts'] + 2)
            # convert to system byte order
            data = np.require(data, stype)
            stream.append(Trace(data=data[2:], header=header))
    return stream
示例#44
0
def readASC(filename,
            headonly=False,
            skip=0,
            delta=None,
            length=None,
            **kwargs):  # @UnusedVariable
    """
    Reads a Seismic Handler ASCII file and returns an ObsPy Stream object.

    .. warning::
        This function should NOT be called directly, it registers via the
        ObsPy :func:`~obspy.core.stream.read` function, call this instead.

    :type filename: str
    :param filename: ASCII file to be read.
    :type headonly: bool, optional
    :param headonly: If set to True, read only the head. This is most useful
        for scanning available data in huge (temporary) data sets.
    :type skip: int, optional
    :param skip: Number of lines to be skipped from top of file. If defined
        only one trace is read from file.
    :type delta: float, optional
    :param delta: If "skip" is used, "delta" defines sample offset in seconds.
    :type length: int, optional
    :param length: If "skip" is used, "length" defines the number of values to
        be read.
    :rtype: :class:`~obspy.core.stream.Stream`
    :return: A ObsPy Stream object.

    .. rubric:: Example

    >>> from obspy.core import read
    >>> st = read("/path/to/QFILE-TEST-ASC.ASC")
    >>> st  # doctest: +ELLIPSIS
    <obspy.core.stream.Stream object at 0x...>
    >>> print(st)  # doctest: +ELLIPSIS
    3 Trace(s) in Stream:
    .TEST..BHN | 2009-10-01T12:46:01.000000Z - ... | 20.0 Hz, 801 samples
    .TEST..BHE | 2009-10-01T12:46:01.000000Z - ... | 20.0 Hz, 801 samples
    .WET..HHZ  | 2010-01-01T01:01:05.999000Z - ... | 100.0 Hz, 4001 samples
    """
    fh = open(filename, 'rt')
    # read file and split text into channels
    channels = []
    headers = {}
    data = StringIO()
    for line in fh.readlines()[skip:]:
        if line.isspace():
            # blank line
            # check if any data fetched yet
            if len(headers) == 0 and data.len == 0:
                continue
            # append current channel
            data.seek(0)
            channels.append((headers, data))
            # create new channel
            headers = {}
            data = StringIO()
            if skip:
                # if skip is set only one trace is read, everything else makes
                # no sense.
                break
            continue
        elif line[0].isalpha():
            # header entry
            key, value = line.split(':', 1)
            key = key.strip()
            value = value.strip()
            headers[key] = value
        elif not headonly:
            # data entry - may be written in multiple columns
            data.write(line.strip() + ' ')
    fh.close()
    # create ObsPy stream object
    stream = Stream()
    # custom header
    custom_header = {}
    if delta:
        custom_header["delta"] = delta
    if length:
        custom_header["npts"] = length

    for headers, data in channels:
        # create Stats
        header = Stats(custom_header)
        header['sh'] = {}
        channel = [' ', ' ', ' ']
        # generate headers
        for key, value in headers.iteritems():
            if key == 'DELTA':
                header['delta'] = float(value)
            elif key == 'LENGTH':
                header['npts'] = int(value)
            elif key == 'CALIB':
                header['calib'] = float(value)
            elif key == 'STATION':
                header['station'] = value
            elif key == 'COMP':
                channel[2] = value[0]
            elif key == 'CHAN1':
                channel[0] = value[0]
            elif key == 'CHAN2':
                channel[1] = value[0]
            elif key == 'START':
                # 01-JAN-2009_01:01:01.0
                # 1-OCT-2009_12:46:01.000
                header['starttime'] = toUTCDateTime(value)
            else:
                # everything else gets stored into sh entry
                if key in SH_KEYS_INT:
                    header['sh'][key] = int(value)
                elif key in SH_KEYS_FLOAT:
                    header['sh'][key] = float(value)
                else:
                    header['sh'][key] = value
        # set channel code
        header['channel'] = ''.join(channel)
        if headonly:
            # skip data
            stream.append(Trace(header=header))
        else:
            # read data
            data = loadtxt(data, dtype='float32', ndlim=1)

            # cut data if requested
            if skip and length:
                data = data[:length]

            # use correct value in any case
            header["npts"] = len(data)

            stream.append(Trace(data=data, header=header))
    return stream
示例#45
0
def stream_collapse_tr(st):

    if not isinstance(st, Stream):
        raise InputError("'st' must be a 'obspy.core.stream.Stream' object")

    stream_new = Stream()
    # Generate sorted list of traces (no copy)
    # Sort order, id, starttime, endtime
    ids = []
    for tr in st:
        if not tr.id in ids:
            ids.append(tr.id)
    for id in ids:
        print "new_trace id: %s" % id
        tr_new = Trace()
        tr_new.data = np.zeros(st[0].data.shape)
#        tr_new.stats = {}
        tr_new.stats_tr1 = {}
        tr_new.stats_tr2 = {}
        starttime1_list = []
        starttime2_list = []
        endtime1_list = []
        endtime2_list = []
        n_tr = 0
        for tr in st:
            if tr.id == id:
                print tr.id
                if len(tr_new.data) != len(tr.data):
                    lp = len(tr_new.data) - len(tr.data)
                    print "lp: %d" % lp
                    if lp > 0:
                        left = np.ceil(lp / 2)
                        right = lp - left
                        cdata = np.append(np.zeros(left, dtype=tr.data.dtype),
                                          tr.data)
                        tr.data = np.append(cdata,
                                            np.zeros(right,
                                                     dtype=tr.data.dtype))
                    else:
                        lp = -lp
                        left = np.ceil(lp / 2)
                        right = lp - left
                        tr.data = tr.data[left:-right]
                    print "len tr: %d" % len(tr)
                    print "len tr_new: % d" % len(tr_new)
                tr_new.data += tr.data
                n_tr += 1
                starttime1_list.append(tr.stats_tr1.starttime)
                starttime2_list.append(tr.stats_tr2.starttime)
                endtime1_list.append(tr.stats_tr1.endtime)
                endtime2_list.append(tr.stats_tr2.endtime)

                tr_new.stats.update(tr.stats)
                tr_new.stats_tr1.update(tr.stats_tr1)
                tr_new.stats_tr2.update(tr.stats_tr2)
        tr_new.data /= n_tr
        tr_new.stats['starttime1'] = starttime1_list
        tr_new.stats['starttime2'] = starttime2_list
        tr_new.stats['endtime1'] = endtime1_list
        tr_new.stats['endtime2'] = endtime2_list
        stream_new.append(tr_new)

    return stream_new
示例#46
0
def usarray_read(fname):
    """ Read the BAM US-Array lbv data format used on Mike-2 test specimen.

    Read the BAM US-Array lbv data format used on Mike-2 test specimen into a
    stream object.
    As there is no obvious station (or any other) information in the data file.
    As the parameters are not supposed to change, they are hardcoded here.

    :parameters:
    ------------
    :type fname: string
    :param fname: Path to the file containing the data to be read
        (WITHOUT EXTENSION) extensions .dat and .hdr will be added
        automatically
    :rtype: :class:`~obspy.core.Stream` object
    :return: **st**: obspy.core.Stream object
        Obspy stream object containing the data
    """

    # filenames
    lbvfilename = fname + '.lbv'
    hdrfilename = fname + '.hdr'

    # initialise
    st = Stream()
    tr = Trace()
    # tr = SacIO()

    # static parameters
    t = os.path.getmtime(hdrfilename)
    tt = datetime.datetime.fromtimestamp(t)

    tr.stats['starttime'] = UTCDateTime(tt.year, tt.month, tt.day, tt.hour,
                                        tt.minute, tt.second, tt.microsecond)
    tr.stats['network'] = 'BAM-USArray'
    tr.stats['channel'] = 'z'

    # reading header from file
    fh = open(hdrfilename, 'r')
    while True:
        line = fh.readline()
        if line.__len__() < 1:
            break
        line = line.rstrip()
        if line.find('PK') > -1:
            parts = re.split(':', line)
            tr.stats['location'] = parts[1].lstrip()
        if line.find('transceivers') > -1:
            parts = re.split(':', line)
            ntra = int(parts[1].lstrip())
            traco = np.zeros((ntra, 3), float)
            for i in range(ntra):
                coordstr = fh.readline().split()
                for j in range(3):
                    traco[i, j] = float(coordstr[j])
        if line.find('measurements') > -1:
            parts = re.split(':', line)
            nmeas = int(parts[1].lstrip())
            measco = np.zeros((nmeas, 2), int)
            for i in range(nmeas):
                configstr = fh.readline().split()
                for j in range(2):
                    measco[i, j] = float(configstr[j])
        if line.find('samples') > -1:
            parts = re.split(':', line)
            tr.stats['npts'] = int(parts[1].lstrip())
        if line.find('samplefreq') > -1:
            parts = re.split(':', line)
            tr.stats['sampling_rate'] = int(parts[1].lstrip())

    fh.close()

    # reading data from file
    fd = open(lbvfilename, 'rb')
    datatype = '>i2'
    read_data = np.fromfile(file=fd, dtype=datatype)
    fd.close()

    # sort and store traces
    for i in range(nmeas):
        # receiver number stored as station name
        tr.stats['station'] = str(measco[i, 1])
        # receiver coords (storing not yet implemented)
        stla = traco[measco[i, 1] - 1, 1]  # x
        stlo = traco[measco[i, 1] - 1, 1]  # y
        stel = traco[measco[i, 1] - 1, 1]  # z
        # transmitter number stored as event name (storing not yet implemented)
        kevnm = str(measco[i, 0])
        # transmitter coords (storing not yet implemented)
        evla = traco[measco[i, 1] - 1, 0]  # x
        evlo = traco[measco[i, 1] - 1, 0]  # y
        evdp = traco[measco[i, 1] - 1, 0]  # z
        tr.data = read_data[i * tr.stats.npts:(i + 1) * tr.stats.npts]
        st.extend([tr])
        # plot 1 trace for test purposes
        # if i==20:
        #    tr.plot()
        #    print ('plot done')

    return st
示例#47
0
# Updating variables from what we just learnt from reading the data
length = data.shape[0]
# Update the Obspy structure for plots, from the data read
print("Updating trace stats...")
statsx.update({'npts': length})
statsx.update({'sampling_rate': int(sampling_rate / decimation)})
statsx.update({'starttime': starttime})

#______________________________________________________________________________
# Generate the dayplot and write to miniSeed format, for each component
print("Generating trace...")
statsx.update({
    'channel':
    'BH' + str(comp) + "." + filename_date[4:6] + "-" + filename_date[6:8]
})
Xt = Trace(data=data[:], header=statsx)
#Xt.filter('lowpass', freq=50, corners=2, zerophase=True)
del data
stream = Stream(traces=[Xt])
del Xt

# Plot output
print("Generating plot...")
outfile = os.path.join(
    plotdir, filename_date[0:8] + '-dayplotFilter' + str(comp) + '.png')
stream.plot(type='dayplot', outfile=outfile, size=size, events=cat_all)

print("Miniseed writing...")
outminiseed = os.path.join(miniseeddir,
                           filename_date[0:8] + "-comp" + str(comp) + '.mseed')
stream.write(outminiseed, format='MSEED')
示例#48
0
    def dcomp_fix_azim(self, azim=None):
        """
        Method to decompose radial and transverse receiver function 
        streams into back-azimuth harmonics along direction ``azim``.

        Parameters
        ----------
        azim : float
            Direction (azimuth) along which the B1 component of the stream
            is minimized (between ``xmin`` and ``xmax``)

        Attributes
        ----------
        hstream : :class:`~obspy.core.Stream`
            Stream containing the 5 harmonics, oriented in direction ``azim``

        """

        if azim is None:
            azim = self.azim
        else:
            self.azim = azim

        print(
            'Decomposing receiver functions into baz harmonics for azimuth = ',
            azim)

        # Some integers
        nbin = len(self.radialRF)
        nz = len(self.radialRF[0].data)
        deg2rad = np.pi / 180.

        # Copy stream stats
        str_stats = self.radialRF[0].stats

        # Initialize work arrays
        C0 = np.zeros(nz)
        C1 = np.zeros(nz)
        C2 = np.zeros(nz)
        C3 = np.zeros(nz)
        C4 = np.zeros(nz)

        # Loop over each depth step
        for iz in range(nz):

            # Initialize working arrays
            OBS = np.zeros(2 * nbin)
            H = np.zeros((2 * nbin, 5))

            # Radial component
            for irow, trace in enumerate(self.radialRF):

                baz = trace.stats.baz
                OBS[irow] = trace.data[iz]
                H[irow, 0] = 1.0
                H[irow, 1] = np.cos(deg2rad * (baz - azim))
                H[irow, 2] = np.sin(deg2rad * (baz - azim))
                H[irow, 3] = np.cos(2. * deg2rad * (baz - azim))
                H[irow, 4] = np.sin(2. * deg2rad * (baz - azim))

            shift = 90.

            # Transverse component
            for irow, trace in enumerate(self.transvRF):

                baz = trace.stats.baz
                OBS[irow + nbin] = trace.data[iz]
                H[irow + nbin, 0] = 0.0
                H[irow + nbin, 1] = np.cos(deg2rad * (baz + shift - azim))
                H[irow + nbin, 2] = np.sin(deg2rad * (baz + shift - azim))
                H[irow + nbin,
                  3] = np.cos(2. * deg2rad * (baz + shift / 2.0 - azim))
                H[irow + nbin,
                  4] = np.sin(2. * deg2rad * (baz + shift / 2.0 - azim))

            # Solve system of equations with truncated SVD
            u, s, v = np.linalg.svd(H)
            s[s < 0.001] = 0.
            CC = np.linalg.solve(s[:, None] * v, u.T.dot(OBS)[:5])

            # Fill up arrays
            C0[iz] = np.float(CC[0])
            C1[iz] = np.float(CC[1])
            C2[iz] = np.float(CC[2])
            C3[iz] = np.float(CC[3])
            C4[iz] = np.float(CC[4])

        # Put back into traces
        A = Trace(data=C0, header=str_stats)
        B1 = Trace(data=C1, header=str_stats)
        B2 = Trace(data=C2, header=str_stats)
        C1 = Trace(data=C3, header=str_stats)
        C2 = Trace(data=C4, header=str_stats)

        # Put all traces into stream
        self.hstream = Stream(traces=[A, B1, B2, C1, C2])
示例#49
0
    def forward(self, baz_list=None):
        """
        Method to forward calculate radial and transverse component
        receiver functions given the 5 pre-determined harmonics and 
        a list of back-azimuth values. The receiver function signal 
        parameters (length, sampling rate, etc.) will be identical 
        to those in the stream of harmonic components.

        Parameters
        ----------
        baz_list : list
            List of back-azimuth directions over which to calculate
            the receiver functions. If no list is specified, the method
            will use the same back-azimuths as those in the original
            receiver function streams

        Attributes
        ----------
        radial_forward : :class:`~obspy.core.Stream`
            Stream containing the radial receiver functions
        transv_forward : :class:`~obspy.core.Stream`
            Stream containing the transverse receiver functions


        """

        if not hasattr(self, 'hstream'):
            raise (Exception("Decomposition has not been performed yet"))

        if not baz_list:
            print("Warning: no BAZ specified - using all baz from " +
                  "stored streams")
            baz_list = [tr.stats.baz for tr in self.radialRF]
        if not isinstance(baz_list, list):
            baz_list = [baz_list]

        # Some constants
        nz = len(self.hstream[0].data)
        deg2rad = np.pi / 180.

        # Copy traces
        self.radial_forward = Stream()
        self.transv_forward = Stream()

        for baz in baz_list:
            trR = Trace(header=self.hstream[0].stats)
            trT = Trace(header=self.hstream[0].stats)

            # Loop over each time/depth step
            for iz in range(nz):

                # Initialize working arrays
                X = np.zeros(5)
                H = np.zeros((2, 5))

                # Fill up X array
                X[0] = hstream[0].data[iz]
                X[1] = hstream[1].data[iz]
                X[2] = hstream[2].data[iz]
                X[3] = hstream[3].data[iz]
                X[4] = hstream[4].data[iz]

                # Fill up H arrays (for V and H)
                H[0, 0] = 1.0
                H[0, 1] = np.cos(deg2rad * (baz - self.azim))
                H[0, 2] = np.sin(deg2rad * (baz - self.azim))
                H[0, 3] = np.cos(2. * deg2rad * (baz - self.azim))
                H[0, 4] = np.sin(2. * deg2rad * (baz - self.azim))

                shift = 90.

                H[1, 0] = 0.0
                H[1, 1] = np.cos(deg2rad * (baz + shift - self.azim))
                H[1, 2] = np.sin(deg2rad * (baz + shift - self.azim))
                H[1,
                  3] = np.cos(2. * deg2rad * (baz + shift / 2.0 - self.azim))
                H[1,
                  4] = np.sin(2. * deg2rad * (baz + shift / 2.0 - self.azim))

                # Calculate dot product B = H*X
                B = np.dot(H, X)

                # Extract receiver functions
                trR.data[iz] = B[0]
                trT.data[iz] = -B[1]

            self.radial_forward.append(trR)
            self.transv_forward.append(trT)
示例#50
0
def xarray_to_obspy(xdataset: xr.Dataset):
    df = xdataset.attrs['df']
    traces = []
    starttime = list(xdataset.coords['time'].values)[0]
    starttime = _extract_timestamp(starttime)
    for name in xdataset.data_vars:
        xarray = xdataset[name]

        srcs = xarray.coords['src'].values
        recs = xarray.coords['rec'].values
        src_chans = xarray.coords['src_chan'].values
        rec_chans = xarray.coords['rec_chan'].values
        unique_stations = set(list(srcs) + list(recs))
        unique_channels = set(list(src_chans) + list(rec_chans))
        unique_pairs = itertools.combinations(unique_stations, 2)
        arg_list = itertools.product(unique_pairs, unique_channels,
                                     unique_channels)
        for parameter in arg_list:
            src = parameter[0][0]
            rec = parameter[0][1]
            src_chan = parameter[1]
            rec_chan = parameter[2]
            arg_combos = [
                dict(src=src, rec=rec, src_chan=src_chan, rec_chan=rec_chan),
                dict(src=src, rec=rec, src_chan=rec_chan, rec_chan=src_chan),
                dict(src=rec, rec=src, src_chan=src_chan, rec_chan=rec_chan),
                dict(src=rec, rec=src, src_chan=rec_chan, rec_chan=src_chan)
            ]

            arg_dict_to_use = None
            for subdict in arg_combos:
                meta_record = df.loc[(df['src'] == subdict['src'])
                                     & (df['rec'] == subdict['rec']) &
                                     (df['src channel'] == subdict['src_chan'])
                                     & (df['rec channel']
                                        == subdict['rec_chan'])]
                arg_dict_to_use = subdict
                if not meta_record.empty:
                    break
            record = xarray.loc[arg_dict_to_use]

            if not meta_record.empty:
                station_1, network_1 = _extract_station_network_info(src)
                station_2, network_2 = _extract_station_network_info(rec)
                header_dict = {
                    'delta': meta_record['delta'].values[0],
                    'npts': record.data.shape[-1],
                    'starttime': starttime,
                    'station': '{}.{}'.format(station_1, station_2),
                    'channel': '{}.{}'.format(src_chan, rec_chan),
                    'network': '{}.{}'.format(network_1, network_2)
                }
                trace = Trace(data=record.data, header=header_dict)
                if 'rec_latitude' in meta_record.columns:
                    trace.stats.coordinates = {
                        'src_latitude': meta_record['src_latitude'].values[0],
                        'src_longitude':
                        meta_record['src_longitude'].values[0],
                        'rec_latitude': meta_record['rec_latitude'].values[0],
                        'rec_longitude': meta_record['rec_longitude'].values[0]
                    }
                traces.append(trace)

    return Stream(traces=traces)
示例#51
0
    stats.network = 'NT'
    stats.location = 'R0'
    stats.data_interval = '256Hz'
    stats.delta = .00390625
    stats.data_type = 'variation'

    # Create list of arrays and channel names and intialize counter k
    arrays = [Hx, Hy, Ex, Ey]
    k = 0

    # Loop over channels to create an obspy stream of the data
    for ar in arrays:
        stats.npts = len(ar)
        stats.channel = channels[k]
        ar = np.asarray(ar)
        trace = Trace(ar, stats)
        stream += trace
        trace = None
        k += 1

    # Create a copy of the stream and resample the copied stream to
    # 10 Hz using the default options of the obspy function resample
    finStream = stream.copy()
    finStream.resample(10.0)

    # Create numpy arrays of the resampled data
    Hx_fin = finStream.select(channel='Hx')[0].data
    Hy_fin = finStream.select(channel='Hy')[0].data
    Ex_fin = finStream.select(channel='Ex')[0].data
    Ey_fin = finStream.select(channel='Ey')[0].data
示例#52
0
def add_corr(params,
             station1,
             station2,
             filterid,
             date,
             time,
             duration,
             components,
             CF,
             sampling_rate,
             name='corr',
             day=False,
             ncorr=0):
    """
    Adds a CCF to the data archive on disk.
    
    :type params: dict 
    :param params: This dictionary contains all parameters for correlaion. see params.py to initilalize it.
    :type station1: str
    :param station1: The name of station 1 (formatted NET.STA)
    :type station2: str
    :param station2: The name of station 2 (formatted NET.STA)
    :type filterid: int
    :param filterid: The ID (ref) of the filter
    :type date: datetime.date or str
    :param date: The date of the CCF
    :type time: datetime.time or str
    :param time: The time of the CCF
    :type duration: float
    :param duration: The total duration of the exported CCF
    :type components: str
    :param components: The name of the components used (ZZ, ZR, ...)
    :type sampling_rate: float
    :param sampling_rate: The sampling rate of the exported CCF
    :type day: bool
    :param day: Whether this function is called to export a daily stack (True)
        or each CCF (when keep_all parameter is set to True in the
        configuration). Defaults to True.
    :type ncorr: int
    :param ncorr: Number of CCF that have been stacked for this CCF.
    """

    output_folder = params['output_folder']
    export_format = params['export_format']
    sac, mseed = False, False
    if export_format in ["BOTH", "both"]:
        mseed = True
        sac = True
    elif export_format in ["SAC", "sac"]:
        sac = True
    elif export_format in ["MSEED", "mseed"]:
        mseed = True
    if params['crosscorr']: pass

    if day:
        path = os.path.join("STACKS", '%s' % name, "%02i" % filterid,
                            "001_DAYS", components,
                            "%s_%s" % (station1, station2), str(date))
        pair = "%s:%s" % (station1, station2)
        if mseed:
            export_mseed(params, path, pair, components, filterid, CF / ncorr,
                         ncorr)
        if sac:
            export_sac(params, path, pair, components, filterid, CF / ncorr,
                       ncorr)

    else:
        file = '%s.cc' % time
        path = os.path.join(output_folder, "%02i" % filterid, station1,
                            station2, components, date)
        if not os.path.isdir(path):
            os.makedirs(path)

        t = Trace()
        t.data = CF
        t.stats.sampling_rate = sampling_rate
        t.stats.starttime = -float(params['maxlag'])
        t.stats.components = components
        # if ncorr != 0:
        # t.stats.location = "%02i"%ncorr
        st = Stream(traces=[
            t,
        ])
        st.write(os.path.join(path, file), format='mseed')
        del t, st
示例#53
0
def read_SES3D(file_or_file_object, *args, **kwargs):
    """
    Turns a SES3D file into a obspy.core.Stream object.

    SES3D files do not contain a starttime and thus the first first sample will
    always begin at 1970-01-01T00:00:00.

    The data will be a floating point array of the ground velocity in meters
    per second.

    Furthermore every trace will have a trace.stats.ses3d dictionary which
    contains the following six keys:
        * receiver_latitude
        * receiver_longitde
        * receiver_depth_in_m
        * source_latitude
        * source_longitude
        * source_depth_in_m

    The network, station, and location attributes of the trace will be empty,
    and the channel will be set to either 'X' (south component), 'Y' (east
    component), or 'Z' (vertical component).
    """
    # Make sure that it is a file like object.
    if not hasattr(file_or_file_object, "read"):
        with open(file_or_file_object, "rb") as open_file:
            file_or_file_object = StringIO(open_file.read())

    # Read the header.
    component = file_or_file_object.readline().split()[0].lower()
    npts = int(file_or_file_object.readline().split()[-1])
    delta = float(file_or_file_object.readline().split()[-1])
    # Skip receiver location line.
    file_or_file_object.readline()
    rec_loc = file_or_file_object.readline().split()
    rec_x, rec_y, rec_z = map(float, [rec_loc[1], rec_loc[3], rec_loc[5]])
    # Skip the source location line.
    file_or_file_object.readline()
    src_loc = file_or_file_object.readline().split()
    src_x, src_y, src_z = map(float, [src_loc[1], src_loc[3], src_loc[5]])

    # Read the data.
    data = np.array(map(float, file_or_file_object.readlines()),
                    dtype="float32")

    # Setup Obspy Stream/Trace structure.
    tr = Trace(data=data)
    tr.stats.delta = delta
    # Map the channel attributes.
    tr.stats.channel = {"theta": "X", "phi": "Y", "r": "Z"}[component]
    tr.stats.ses3d = AttribDict()
    tr.stats.ses3d.receiver_latitude = rotations.colat2lat(rec_x)
    tr.stats.ses3d.receiver_longitude = rec_y
    tr.stats.ses3d.receiver_depth_in_m = rec_z
    tr.stats.ses3d.source_latitude = rotations.colat2lat(src_x)
    tr.stats.ses3d.source_longitude = src_y
    tr.stats.ses3d.source_depth_in_m = src_z
    # Small check.
    if npts != tr.stats.npts:
        msg = "The sample count specified in the header does not match " + \
            "the actual data count."
        warnings.warn(msg)
    return Stream(traces=[tr])
示例#54
0
def kutec_read(fname):
    """ Read the K-UTec proprietary file format.

    Read data in the K-UTec specific IMC FAMOS format into a stream object.
    As there is no obvious station information in the data file
    Network is set to KU and Station is set to the first five letters of the
    filename.

    :parameters:
    ------------
    fname : string
        path to the file containing the data to be read

    .. rubric:: Returns

    st : obspy.core.Stream object
        Obspy stream object containing the data

    """
    tr = Trace()

    line = []
    keys = {}
    f = open(fname, 'r')
    char = f.read(1)  # read leading '|'
    while char == '|':
        key = []
        cnt = 0
        while 1:
            key.append(f.read(1))
            if key[-1] == ',':
                cnt += 1
            if cnt == 3:
                break
        tkeys = string.split(string.join(key, ''), ',')
        key.append(f.read(int(tkeys[2])))
        keyline = string.join(key, '')
        f.read(1)  # read terminating ';'
        char = f.read(1)  # read leading '|'
        # print char
        while (char == '\r') or (char == '\n'):
            char = f.read(1)  # read leading '|'
        #    print char
        keyval = keyline.split(',')
        # ######
        # # in the post 20120619 version files there are leading
        # linefeed in the key (\n), remove them here
        if keyval[0].startswith('\n|'):
            print "does this happen", keyval
            keyval[0] = keyval[0][2:]

        if keyval[0] == 'CF':
            keys[keyval[0]] = {}
            keys[keyval[0]]['Dateiformat'] = int(keyval[1])
            keys[keyval[0]]['Keylaenge'] = int(keyval[2])
            keys[keyval[0]]['Prozessor'] = int(keyval[3])
        elif keyval[0] == 'CK':
            keys[keyval[0]] = {}
            keys[keyval[0]]['Version'] = int(keyval[1])
            keys[keyval[0]]['Lang'] = int(keyval[2])
            keys[keyval[0]]['Dump'] = keyval[3]
            keys[keyval[0]]['Abgeschlossen'] = int(keyval[3])
            if keys[keyval[0]]['Abgeschlossen'] != 1:
                print "%s %s = %s not implemented." % (keyval[0], \
                        'Abgeschlossen', keys[keyval[0]]['DirekteFolgeAnzahl'])
        elif keyval[0] == 'NO':
            keys[keyval[0]] = {}
            keys[keyval[0]]['Version'] = int(keyval[1])
            keys[keyval[0]]['Lang'] = int(keyval[2])
            keys[keyval[0]]['Ursprung'] = int(keyval[3])
            keys[keyval[0]]['NameLang'] = int(keyval[4])
            keys[keyval[0]]['Name'] = keyval[5]
            keys[keyval[0]]['KommLang'] = int(keyval[6])
            if keys[keyval[0]]['KommLang']:
                keys[keyval[0]]['Kommemtar'] = keyval[7]
        elif keyval[0] == 'CP':
            keys[keyval[0]] = {}
            keys[keyval[0]]['Version'] = int(keyval[1])
            keys[keyval[0]]['Lang'] = int(keyval[2])
            keys[keyval[0]]['BufferReferenz'] = int(keyval[3])
            keys[keyval[0]]['Bytes'] = int(keyval[4])  # Bytes fuer
                                                        # einen Messwert
            keys[keyval[0]]['ZahlenFormat'] = int(keyval[5])
            keys[keyval[0]]['SignBits'] = int(keyval[6])
            keys[keyval[0]]['Maske'] = int(keyval[7])
            keys[keyval[0]]['Offset'] = int(keyval[8])
            keys[keyval[0]]['DirekteFolgeAnzahl'] = int(keyval[9])
            keys[keyval[0]]['AbstandBytes'] = int(keyval[10])
            if keys[keyval[0]]['DirekteFolgeAnzahl'] != 1:
                print "%s %s = %s not implemented." % (keyval[0], \
                   'DirekteFolgeAnzahl', keys[keyval[0]]['DirekteFolgeAnzahl'])
                break

        elif keyval[0] == 'Cb':
            keys[keyval[0]] = {}
            keys[keyval[0]]['Version'] = int(keyval[1])
            keys[keyval[0]]['Lang'] = int(keyval[2])
            keys[keyval[0]]['AnzahlBufferInKey'] = int(keyval[3])
            if keys[keyval[0]]['AnzahlBufferInKey'] != 1:
                print "%s %s = %d not implemented." % (keyval[0], \
                    'AnzahlBufferInKey', keys[keyval[0]]['AnzahlBufferInKey'])
                break
            keys[keyval[0]]['BytesInUserInfo'] = int(keyval[4])
            keys[keyval[0]]['BufferReferenz'] = int(keyval[5])
            keys[keyval[0]]['IndexSampleKey'] = int(keyval[6])
            keys[keyval[0]]['OffsetBufferInSampleKey'] = int(keyval[7])
            if keys[keyval[0]]['OffsetBufferInSampleKey'] != 0:
                print "%s %s = %d not implemented." % (keyval[0], \
                                    'OffsetBufferInSampleKey', \
                                    keys[keyval[0]]['OffsetBufferInSampleKey'])
                break
            keys[keyval[0]]['BufferLangBytes'] = int(keyval[8])
            keys[keyval[0]]['OffsetFirstSampleInBuffer'] = int(keyval[9])
            if keys[keyval[0]]['OffsetFirstSampleInBuffer'] != 0:
                print "%s %s = %d not implemented." % (keyval[0], \
                                'OffsetFirstSampleInBuffer', \
                                keys[keyval[0]]['OffsetFirstSampleInBuffer'])
                break
            keys[keyval[0]]['BufferFilledBytes'] = int(keyval[10])
            keys[keyval[0]]['x0'] = float(keyval[12])
            keys[keyval[0]]['Addzeit'] = float(keyval[13])
            if keys[keyval[0]]['BytesInUserInfo']:
                keys[keyval[0]]['UserInfo'] = int(keyval[14])
        elif keyval[0] == 'CS':
            keys[keyval[0]] = {}
            keys[keyval[0]]['Version'] = int(keyval[1])
            keys[keyval[0]]['Lang'] = int(keyval[2])
            keys[keyval[0]]['AnzahlBufferInKey'] = int(keyval[3])
            tmp = string.join(keyval[4:], ',')
            keys[keyval[0]]['Rohdaten'] = tmp

            npts = keys['Cb']['BufferFilledBytes'] / keys['CP']['Bytes']
            tr.stats['npts'] = npts
            # allocate array
            tr.data = np.ndarray(npts, dtype=float)
            # treat different number formats
            if keys['CP']['ZahlenFormat'] == 4:
                tmp = np.fromstring(keys['CS']['Rohdaten'], dtype='uint8', \
                                count=npts * 2)
                tr.data = (tmp[0::2].astype(float) + \
                       (tmp[1::2].astype(float) * 256))
                tr.data[np.nonzero(tr.data > 32767)] -= 65536
            elif keys['CP']['ZahlenFormat'] == 8:
                tr.data = np.fromstring(keys['CS']['Rohdaten'],
                                        dtype='float64',
                                        count=npts)
            else:
                print "%s %s = %d not implemented." % (keyval[0], \
                             'ZahlenFormat', keys[keyval[0]]['ZahlenFormat'])
                break

        elif keyval[0] == 'NT':
            keys[keyval[0]] = {}
            keys[keyval[0]]['Version'] = int(keyval[1])
            keys[keyval[0]]['Lang'] = int(keyval[2])
            keys[keyval[0]]['Tag'] = int(keyval[3])
            keys[keyval[0]]['Monat'] = int(keyval[4])
            keys[keyval[0]]['Jahr'] = int(keyval[5])
            keys[keyval[0]]['Stunden'] = int(keyval[6])
            keys[keyval[0]]['Minuten'] = int(keyval[7])
            keys[keyval[0]]['Sekunden'] = float(keyval[8])
            tr.stats['starttime'] = UTCDateTime(keys[keyval[0]]['Jahr'], \
                                                keys[keyval[0]]['Monat'], \
                                                keys[keyval[0]]['Tag'], \
                                                keys[keyval[0]]['Stunden'], \
                                                keys[keyval[0]]['Minuten'], \
                                                keys[keyval[0]]['Sekunden'])
        elif keyval[0] == 'CD':
            keys[keyval[0]] = {}
            keys[keyval[0]]['Version'] = int(keyval[1])
            keys[keyval[0]]['Lang'] = int(keyval[2])
            keys[keyval[0]]['dx'] = float(keyval[3])
            tr.stats['delta'] = keys[keyval[0]]['dx']
            keys[keyval[0]]['kalibiert'] = int(keyval[4])
            if keys[keyval[0]]['kalibiert'] != 1:
                print "%s %s = %d not implemented." % \
                    (keyval[0], 'kalibiert',
                     keys[keyval[0]]['kalibiert'])
                break
            keys[keyval[0]]['EinheitLang'] = int(keyval[5])
            keys[keyval[0]]['Einheit'] = keyval[6]

            if keys[keyval[0]]['Version'] == 2:
                keys[keyval[0]]['Reduktion'] = int(keyval[7])
                keys[keyval[0]]['InMultiEvents'] = int(keyval[8])
                keys[keyval[0]]['SortiereBuffer'] = int(keyval[9])
                keys[keyval[0]]['x0'] = float(keyval[10])
                keys[keyval[0]]['PretriggerVerwendung'] = int(keyval[11])
            if keys[keyval[0]]['Version'] == 1:
                keys[keyval[0]]['Reduktion'] = ''
                keys[keyval[0]]['InMultiEvents'] = ''
                keys[keyval[0]]['SortiereBuffer'] = ''
                keys[keyval[0]]['x0'] = ''
                keys[keyval[0]]['PretriggerVerwendung'] = 0

        elif keyval[0] == 'CR':
            keys[keyval[0]] = {}
            keys[keyval[0]]['Version'] = int(keyval[1])
            keys[keyval[0]]['Lang'] = int(keyval[2])
            keys[keyval[0]]['Transformieren'] = int(keyval[3])
            keys[keyval[0]]['Faktor'] = float(keyval[4])
            keys[keyval[0]]['Offset'] = float(keyval[5])
            keys[keyval[0]]['Kalibriert'] = int(keyval[6])
            keys[keyval[0]]['EinheitLang'] = int(keyval[7])
            keys[keyval[0]]['Einheit'] = keyval[8]
        elif keyval[0] == 'CN':  # station names
            keys[keyval[0]] = {}
            keys[keyval[0]]['Version'] = int(keyval[1])
            keys[keyval[0]]['Lang'] = int(keyval[2])
            keys[keyval[0]]['IndexGruppe'] = int(keyval[3])
            keys[keyval[0]]['IndexBit'] = int(keyval[5])
            keys[keyval[0]]['NameLang'] = int(keyval[6])
            keys[keyval[0]]['Name'] = keyval[7]
            keys[keyval[0]]['KommLang'] = int(keyval[8])
            keys[keyval[0]]['Kommentar'] = keyval[9]
        else:
            keys[keyval[0]] = {}
            keys[keyval[0]]['KeyString'] = keyval[1:]

    # NT key is beginning of measurement (starting of measurement unit)
    # keys['Cb']['Addzeit'] needs to be added to obtain the absolute trigger
    # time

    tr.stats['starttime'] += keys['Cb']['Addzeit']

    # Adjust starttime according to pretrigger (There is some uncertainty
    # about the CD key) to get relative trigger time
    # for CD:Version == 1 always use Cb:x0
    # for CD:Version == 2 only use Cb:x0 if CD:PretriggerVerwendung == 1
    if keys['CD']['Version'] == 1 or \
        (keys['CD']['Version'] == 2 and
         keys['CD']['PretriggerVerwendung'] == 1):
        tr.stats['starttime'] += keys['Cb']['x0']

    if 'CR' in keys:
        if keys['CR']['Transformieren']:
            tr.data = tr.data * keys['CR']['Faktor'] + keys['CR']['Offset']

    f.close()
    # ### Channel naming
    tr.stats['network'] = 'KU'
    tr.stats['location'] = ''
    # ### Pre 20120619 namin convention to extract the station name from the
    # filename
    # tr.stats['station'] = fname[-12:-7]
    # ### Now take the station name from the ICN key
    tr.stats['station'] = keys['CN']['Name'].replace('_', '')
    # ### or construct a name that is consistent with the old filename
    # generated one from the key
    # ### This is is very likely to cause a problem sooner or later.
    # tr.stats['station'] = 'MK%03d' % int(keys['CN']['Name'].split('_')[-1])

    # tr.stats['station'] = keys['CN']['Name'].replace('_','')

    st = Stream()
    st.extend([tr])

    return st
示例#55
0
#Processing each channel to prevent memory error reguarding matplotilb
for x in ['X', 'Y', 'Z']:
    statsx = {
        'network': 'TW',
        'station': 'RASPI',
        'location': '00',
        'channel': 'BH' + x,
        'npts': length,
        'sampling_rate': sampling_rate,
        'mseed': {
            'dataquality': 'D'
        },
        'starttime': starttime
    }
    Xt = Trace(data=data[x], header=statsx)
    Xt_filt = Xt.copy()
    Xt_filt.filter('lowpass', freq=20.0, corners=2, zerophase=True)
    stream = Stream(traces=[Xt_filt])
    stream.plot(type='dayplot',
                outfile='dayplotFilter' + x + '.png',
                size=size,
                events=events)
    stream = Stream(traces=[Xt])
    stream.plot(type='dayplot',
                outfile='dayplot' + x + '.png',
                size=size,
                events=events)

#Remove all the download and generated files
os.system('rm -rf /root/earthquaketemp')
示例#56
0
def main():
    db = connect()

    logging.basicConfig(level=logging.INFO,
                        format='%(asctime)s [%(levelname)s] %(message)s',
                        datefmt='%Y-%m-%d %H:%M:%S')

    logging.info('*** Starting: Compute SARA_RATIO ***')

    while is_next_job(db, jobtype='SARA_RATIO'):
        t0 = time.time()
        jobs = get_next_job(db, jobtype='SARA_RATIO')
        stations = []
        pairs = []
        refs = []

        for job in jobs:
            refs.append(job.ref)
            pairs.append(job.pair)
            netsta1, netsta2 = job.pair.split(':')
            stations.append(netsta1)
            stations.append(netsta2)
            goal_day = job.day

        stations = np.unique(stations)

        logging.info("New SARA Job: %s (%i pairs with %i stations)" %
                     (goal_day, len(pairs), len(stations)))

        logging.debug(
            "Preloading all envelopes and applying site and sensitivity")
        all = {}
        for station in stations:
            tmp = get_sara_param(db, station)
            sensitivity = tmp.sensitivity
            site_effect = tmp.site_effect
            try:
                tmp = read(
                    os.path.join("SARA", "ENV", station,
                                 "%s.MSEED" % goal_day))
            except:
                logging.debug("Error reading %s:%s" % (station, goal_day))
                continue
            for trace in tmp:
                trace.data /= (sensitivity * site_effect)
            all[station] = tmp

        logging.debug("Computing all pairs")
        for job in jobs:
            netsta1, netsta2 = job.pair.split(':')
            net1, sta1 = netsta1.split(".")
            net2, sta2 = netsta2.split(".")
            trace = Trace()
            if netsta1 not in all or netsta2 not in all:
                update_job(db,
                           job.day,
                           job.pair,
                           'SARA_RATIO',
                           'D',
                           ref=job.ref)
                continue
            tmp = Stream()
            for tr in all[netsta1]:
                tmp += tr
            for tr in all[netsta2]:
                tmp += tr
            # tmp = Stream(traces=[all[netsta1], all[netsta2]])
            # print(tmp)
            tmp.merge()
            tmp = make_same_length(tmp)
            tmp.merge(fill_value=np.nan)
            if len(tmp) > 1:
                trace.data = tmp.select(network=net1, station=sta1)[0].data / \
                             tmp.select(network=net2, station=sta2)[0].data
                trace.stats.starttime = tmp[0].stats.starttime
                trace.stats.delta = tmp[0].stats.delta

                env_output_dir = os.path.join('SARA', 'RATIO',
                                              job.pair.replace(":", "_"))
                if not os.path.isdir(env_output_dir):
                    os.makedirs(env_output_dir)
                trace.write(os.path.join(env_output_dir, goal_day + '.MSEED'),
                            format="MSEED",
                            encoding="FLOAT32")

            update_job(db, job.day, job.pair, 'SARA_RATIO', 'D', ref=job.ref)
            del tmp
        logging.info("Done. It took %.2f seconds" % (time.time() - t0))
示例#57
0
def save_data():
	while True:
		if queue.qsize()>=block_length:

			#two arrays for reading samples & jitter into
			data=numpy.zeros([block_length],dtype=numpy.int16)
			#note jitter uses float32 - decimals
			jitter=numpy.zeros([block_length],dtype=numpy.float32)

			firsttime=True
			totaltime=0
			sample_time = 0
			sample_difference = 0
			
			#this is the loop without storing jitter value and calcs
			packet = queue.get()
			data[0] = packet[0]
			starttime = packet[1]
			
			previous_sample=packet[1]
			queue.task_done()

			for x in range (1,block_length):
				packet = queue.get()
				data[x] = packet[0]
				
				sample_time=packet[1]
				sample_difference=sample_time- previous_sample

				#as sps is a rate, and s.d. is time, its 1 over sps
				jitter[x] = sample_difference - (1/sps)
				
				#previos_sample is used to get the difference in the next loop
				previous_sample=packet[1]

				totaltime=totaltime+sample_difference
				queue.task_done()

	
			#a.s.r. is a rate, and t.t is time so its 1 over
			avg_samplingrate = 1 / (totaltime/block_length)
			stats = {'network': 'UK', 'station': 'RASPI', 'location': '00',
					'channel': 'BHZ', 'npts': block_length, 'sampling_rate': avg_samplingrate, 
					'mseed': {'dataquality': 'D'},'starttime': starttime}
			
			sample_stream =Stream([Trace(data=data, header=stats)])
			jitter_stream =Stream([Trace(data=jitter)])

			#write sample data
			File = mseed_directory + str(sample_stream[0].stats.starttime.date) + '.mseed'
			temp_file = mseed_directory + ".temp.tmp"
			
			if os.path.isfile(File):
				#writes temp file, then merges it with the whole file, then removes file after
				sample_stream.write(temp_file,format='MSEED',encoding='INT16',reclen=512)
				subprocess.call("cat "+temp_file+" >> "+File,shell=True)
				subprocess.call(["rm",temp_file])
			else:
			#if this is the first block of day
				sample_stream.write(File,format='MSEED',encoding='INT16',reclen=512)

			
			#write jitter data
			File = jitter_directory + str(jitter_stream[0].stats.starttime.date) + '.mseed'
			temp_file = jitter_directory + ".temp.tmp"
			
			if os.path.isfile(File):
				#writes temp file, then merges it with the whole file, then removes file after
				jitter_stream.write(temp_file,format='MSEED',encoding='FLOAT32',reclen=512)
				subprocess.call("cat "+temp_file+" >> "+File,shell=True)
				subprocess.call(["rm",temp_file])
			else:
			#if this is the first block of day
				jitter_stream.write(File,format='MSEED',encoding='FLOAT32',reclen=512)
示例#58
0
traces = []

chans = ['Z', 'N', 'E']

for file in glob.iglob(path + 'yspec.out.*'):
    stationID = int(file.split('.')[-1])
    
    dat = np.loadtxt(file)
    npts = len(dat[:,0])

    for i, chan in enumerate(chans):
        stats = {'network': 'SG', 
                 'station': 'RS%02d' % stationID, 
                 'location': '',
                 'channel': chan, 
                 'npts': npts, 
                 'sampling_rate': (npts - 1.)/(dat[-1,0] - dat[0,0]),
                 'starttime': t,
                 'mseed' : {'dataquality': 'D'}}
        traces.append(Trace(data=dat[:,1+i], header=stats))
 
 
st = Stream(traces)
st.sort()

fname =  path + 'seismograms.mseed'

print fname
st.write(fname, format='MSEED')
示例#59
0
def ncExtract(address):
    """
    This function extract a station (data, response file, header) from a
    netCDF file

    : type rootgrp: netCDF4.Dataset
    : param rootgrp: a netCDF version 4 group that contains one event
    : type tr: class 'obspy.core.trace.Trace'
    : param tr: the trace that will be extracted from the nc file
    : type resp_read: str
    : param resp_read: the whole response file of the trace in 
                       one string format extracted from the info/respfile attribute
    """

    global rootgrp

    if not os.path.isdir(os.path.join(address, 'Resp_NC')):
        os.mkdir(os.path.join(address, 'Resp_NC'))

    if not os.path.isdir(os.path.join(address, 'BH_NC')):
        os.mkdir(os.path.join(address, 'BH_NC'))

    root_grps = rootgrp.groups

    num_iter = 1
    print "\n----------------------------"
    print "Number of all available"
    print "stations in the netCDF file:"
    print len(root_grps)
    print "----------------------------\n"

    if not input['noaxisem'] == 'Y':
        axi_open = open(os.path.join(address, 'STATIONS'), 'w')
        axi_open.writelines(rootgrp.axisem[17:])
        axi_open.close

    for grp in root_grps:

        print str(num_iter),

        stgrp = root_grps[grp]
        stdata = stgrp.variables['data'][:]

        resp_read = stgrp.respfile

        if not resp_read == 'NO RESPONSE FILE AVAILABLE':
            resp_open = open(
                os.path.join(address, 'Resp_NC', 'RESP.' + stgrp.identity),
                'w')
            resp_open.writelines(resp_read)
            resp_open.close
        else:
            print '\nNO RESPONSE FILE AVAILABLE for ' + stgrp.identity

        ststats = {}

        for key in range(0, len(eval(stgrp.headerK))):
            ststats[eval(stgrp.headerK)[key]] = eval(stgrp.headerV)[key]

        tr = Trace(stdata, ststats)

        tr.write(os.path.join(address, 'BH_NC', stgrp.identity), format='SAC')

        num_iter += 1
示例#60
0
def readQ(filename,
          headonly=False,
          data_directory=None,
          byteorder='=',
          **kwargs):  # @UnusedVariable
    """
    Reads a Seismic Handler Q file and returns an ObsPy Stream object.

    .. warning::
        This function should NOT be called directly, it registers via the
        ObsPy :func:`~obspy.core.stream.read` function, call this instead.

    :type filename: str
    :param filename: Q header file to be read. Must have a `QHD` file
        extension.
    :type headonly: bool, optional
    :param headonly: If set to True, read only the head. This is most useful
        for scanning available data in huge (temporary) data sets.
    :type data_directory: str, optional
    :param data_directory: Data directory where the corresponding QBN file can
        be found.
    :type byteorder: ``'<'``, ``'>'``, or ``'='``, optional
    :param byteorder: Enforce byte order for data file. This is important for
        Q files written in older versions of Seismic Handler, which don't
        explicit state the `BYTEORDER` flag within the header file. Defaults
        to ``'='`` (local byte order).
    :rtype: :class:`~obspy.core.stream.Stream`
    :return: A ObsPy Stream object.

    Q files consists of two files per data set:

     * a ASCII header file with file extension `QHD` and the
     * binary data file with file extension `QBN`.

    The read method only accepts header files for the ``filename`` parameter.
    ObsPy assumes that the corresponding data file is within the same directory
    if the ``data_directory`` parameter is not set. Otherwise it will search
    in the given ``data_directory`` for a file with the `QBN` file extension.
    This function should NOT be called directly, it registers via the
    ObsPy :func:`~obspy.core.stream.read` function, call this instead.

    .. rubric:: Example

    >>> from obspy.core import read
    >>> st = read("/path/to/QFILE-TEST.QHD")
    >>> st    #doctest: +ELLIPSIS
    <obspy.core.stream.Stream object at 0x...>
    >>> print(st)  # doctest: +ELLIPSIS
    3 Trace(s) in Stream:
    .TEST..BHN | 2009-10-01T12:46:01.000000Z - ... | 20.0 Hz, 801 samples
    .TEST..BHE | 2009-10-01T12:46:01.000000Z - ... | 20.0 Hz, 801 samples
    .WET..HHZ  | 2010-01-01T01:01:05.999000Z - ... | 100.0 Hz, 4001 samples
    """
    if not headonly:
        if not data_directory:
            data_file = os.path.splitext(filename)[0] + '.QBN'
        else:
            data_file = os.path.basename(os.path.splitext(filename)[0])
            data_file = os.path.join(data_directory, data_file + '.QBN')
        if not os.path.isfile(data_file):
            msg = "Can't find corresponding QBN file at %s."
            raise IOError(msg % data_file)
        fh_data = open(data_file, 'rb')
    # loop through read header file
    fh = open(filename, 'rt')
    line = fh.readline()
    cmtlines = int(line[5:7]) - 1
    # comment lines
    comments = []
    for _i in xrange(0, cmtlines):
        comments += [fh.readline()]
    # trace lines
    traces = {}
    i = -1
    id = ''
    for line in fh:
        cid = int(line[0:2])
        if cid != id:
            id = cid
            i += 1
        traces.setdefault(i, '')
        traces[i] += line[3:].strip()
    # create stream object
    stream = Stream()
    for id in sorted(traces.keys()):
        # fetch headers
        header = {}
        header['sh'] = {
            "FROMQ": True,
            "FILE": os.path.splitext(os.path.split(filename)[1])[0],
        }
        channel = ['', '', '']
        npts = 0
        for item in traces[id].split('~'):
            key = item.strip()[0:4]
            value = item.strip()[5:].strip()
            if key == 'L001':
                npts = header['npts'] = int(value)
            elif key == 'L000':
                continue
            elif key == 'R000':
                header['delta'] = float(value)
            elif key == 'R026':
                header['calib'] = float(value)
            elif key == 'S001':
                header['station'] = value
            elif key == 'C000' and value:
                channel[2] = value[0]
            elif key == 'C001' and value:
                channel[0] = value[0]
            elif key == 'C002' and value:
                channel[1] = value[0]
            elif key == 'C003':
                if value == '<' or value == '>':
                    byteorder = header['sh']['BYTEORDER'] = value
            elif key == 'S021':
                # 01-JAN-2009_01:01:01.0
                # 1-OCT-2009_12:46:01.000
                header['starttime'] = toUTCDateTime(value)
            elif key == 'S022':
                header['sh']['P-ONSET'] = toUTCDateTime(value)
            elif key == 'S023':
                header['sh']['S-ONSET'] = toUTCDateTime(value)
            elif key == 'S024':
                header['sh']['ORIGIN'] = toUTCDateTime(value)
            elif key:
                key = INVERTED_SH_IDX.get(key, key)
                if key in SH_KEYS_INT:
                    header['sh'][key] = int(value)
                elif key in SH_KEYS_FLOAT:
                    header['sh'][key] = float(value)
                else:
                    header['sh'][key] = value
        # set channel code
        header['channel'] = ''.join(channel)
        # remember record number
        header['sh']['RECNO'] = len(stream) + 1
        if headonly:
            # skip data
            stream.append(Trace(header=header))
        else:
            if not npts:
                stream.append(Trace(header=header))
                continue
            # read data
            data = fh_data.read(npts * 4)
            dtype = byteorder + 'f4'
            data = np.fromstring(data, dtype=dtype)
            # convert to system byte order
            data = np.require(data, '=f4')
            stream.append(Trace(data=data, header=header))
    if not headonly:
        fh_data.close()
    return stream