Пример #1
0
 def test_sacpaz_from_dataless(self):
     # The following dictionary is extracted from a datalessSEED
     # file
     pazdict = {'sensitivity': 2516580000.0,
                'digitizer_gain': 1677720.0, 'seismometer_gain': 1500.0,
                'zeros': [0j, 0j], 'gain': 59198800.0,
                'poles': [(-0.037010000000000001 + 0.037010000000000001j),
                          (-0.037010000000000001 - 0.037010000000000001j),
                          (-131 + 467.30000000000001j),
                          (-131 - 467.30000000000001j),
                          (-251.30000000000001 + 0j)]}
     tr = Trace()
     # This file was extracted from the datalessSEED file using rdseed
     pazfile = os.path.join(os.path.dirname(__file__),
                            'data', 'SAC_PZs_NZ_HHZ_10')
     attach_paz(tr, pazfile, todisp=False)
     sacconstant = pazdict['digitizer_gain'] * \
         pazdict['seismometer_gain'] * pazdict['gain']
     np.testing.assert_almost_equal(tr.stats.paz['gain'] / 1e17,
                                    sacconstant / 1e17, decimal=6)
     # pole-zero files according to the SAC convention are in displacement
     self.assertEqual(len(tr.stats.paz['zeros']), 3)
Пример #2
0
 def test_always_sac_reftime(self):
     """
     Writing a SAC file from a .stats.sac with no reference time should
     still write a SAC file with a reference time.
     """
     reftime = UTCDateTime('2010001')
     a = 12.34
     b = 0.0
     tr = Trace(np.zeros(1000))
     tr.stats.delta = 0.01
     tr.stats.station = 'XXX'
     tr.stats.starttime = reftime
     tr.stats.sac = {}
     tr.stats.sac['a'] = a
     tr.stats.sac['b'] = b
     with io.BytesIO() as tf:
         tr.write(tf, format='SAC')
         tf.seek(0)
         tr1 = read(tf)[0]
     self.assertEqual(tr1.stats.starttime, reftime)
     self.assertAlmostEqual(tr1.stats.sac.a, a, places=5)
     self.assertEqual(tr1.stats.sac.b, b)
Пример #3
0
 def test_writeSmallTrace(self):
     """
     Tests writing Traces containing 0, 1 or 2 samples only.
     """
     for format in ['SH_ASC', 'Q']:
         for num in range(0, 4):
             tr = Trace(data=np.arange(num))
             with NamedTemporaryFile() as tf:
                 tempfile = tf.name
                 if format == 'Q':
                     tempfile += '.QHD'
                 tr.write(tempfile, format=format)
                 # test results
                 with warnings.catch_warnings() as _:  # NOQA
                     warnings.simplefilter("ignore")
                     st = read(tempfile, format=format)
                 self.assertEqual(len(st), 1)
                 self.assertEqual(len(st[0]), num)
                 # Q files consist of two files - deleting additional file
                 if format == 'Q':
                     os.remove(tempfile[:-4] + '.QBN')
                     os.remove(tempfile[:-4] + '.QHD')
Пример #4
0
 def test_read_and_write_via_obspy(self):
     """
     Read and Write files via obspy.core.Trace
     """
     testdata = np.array(
         [111, 111, 111, 111, 111, 109, 106, 103, 103, 110, 121, 132, 139])
     with NamedTemporaryFile() as fh:
         testfile = fh.name
         self.file = os.path.join(self.path, '3cssan.reg.8.1.RNON.wav')
         tr = read(self.file, format='WAV')[0]
         self.assertEqual(tr.stats.npts, 10599)
         self.assertEqual(tr.stats['sampling_rate'], 7000)
         np.testing.assert_array_equal(tr.data[:13], testdata)
         # write
         st2 = Stream()
         st2.traces.append(Trace())
         st2[0].data = tr.data.copy()  # copy the data
         st2.write(testfile, format='WAV', framerate=7000)
         # read without giving the WAV format option
         tr3 = read(testfile)[0]
         self.assertEqual(tr3.stats, tr.stats)
         np.testing.assert_array_equal(tr3.data[:13], testdata)
Пример #5
0
 def test_read_and_write_via_obspy(self):
     """
     Read and Write files via L{obspy.Trace}
     """
     gse2file = os.path.join(self.path, 'data', 'loc_RNON20040609200559.z')
     # read trace
     st1 = read(gse2file)
     st1.verify()
     tr1 = st1[0]
     # write comparison trace
     with NamedTemporaryFile() as tf:
         tempfile = tf.name
         st2 = Stream()
         st2.traces.append(Trace())
         tr2 = st2[0]
         tr2.data = copy.deepcopy(tr1.data)
         tr2.stats = copy.deepcopy(tr1.stats)
         # raises "UserWarning: Bad value in GSE2 header field"
         with warnings.catch_warnings():
             warnings.simplefilter('ignore', UserWarning)
             st2.write(tempfile, format='GSE2')
         # read comparison trace
         st3 = read(tempfile)
     st3.verify()
     tr3 = st3[0]
     # check if equal
     self.assertEqual(tr3.stats['station'], tr1.stats['station'])
     self.assertEqual(tr3.stats.npts, tr1.stats.npts)
     self.assertEqual(tr3.stats['sampling_rate'],
                      tr1.stats['sampling_rate'])
     self.assertEqual(tr3.stats.get('channel'), tr1.stats.get('channel'))
     self.assertEqual(tr3.stats.get('starttime'),
                      tr1.stats.get('starttime'))
     self.assertEqual(tr3.stats.get('calib'), tr1.stats.get('calib'))
     self.assertEqual(tr3.stats.gse2.get('vang'),
                      tr1.stats.gse2.get('vang'))
     self.assertEqual(tr3.stats.gse2.get('calper'),
                      tr1.stats.gse2.get('calper'))
     np.testing.assert_equal(tr3.data, tr1.data)
Пример #6
0
def _convert_adj_to_trace(adj):
    """
    Convert AdjointSource to Trace,for internal use only
    """
    meta = {}

    tr = Trace()
    tr.data = adj.adjoint_source
    tr.stats.starttime = adj.starttime
    tr.stats.delta = adj.dt

    tr.stats.channel = adj.component
    tr.stats.station = adj.station
    tr.stats.network = adj.network
    tr.stats.location = adj.location

    meta["adj_src_type"] = adj.adj_src_type
    meta["misfit"] = adj.misfit
    meta["min_period"] = adj.min_period
    meta["max_period"] = adj.max_period

    return tr, meta
Пример #7
0
 def test_trimFloatingPointWithPadding2(self):
     """
     Use more complicated times and sampling rate.
     """
     tr = Trace(data=np.arange(111))
     tr.stats.starttime = UTCDateTime(111.11111)
     tr.stats.sampling_rate = 50.0
     org_stats = deepcopy(tr.stats)
     org_data = deepcopy(tr.data)
     # Save memory position of array.
     mem_pos = tr.data.ctypes.data
     # Create temp trace object used for testing.
     temp = deepcopy(tr)
     temp.trim(UTCDateTime(111.22222),
               UTCDateTime(112.99999),
               nearest_sample=False)
     # Should again be identical.#XXX not
     temp2 = deepcopy(tr)
     temp2.trim(UTCDateTime(111.21111),
                UTCDateTime(113.01111),
                nearest_sample=False)
     np.testing.assert_array_equal(temp.data, temp2.data[1:-1])
     # Check stuff.
     self.assertEqual(temp.stats.starttime, UTCDateTime(111.23111))
     self.assertEqual(temp.stats.endtime, UTCDateTime(112.991110))
     # Check if the data is the same.
     temp = deepcopy(tr)
     temp.trim(UTCDateTime(0), UTCDateTime(1000 * 1000), pad=True)
     self.assertNotEqual(temp.data.ctypes.data, tr.data.ctypes.data)
     # starttime must be in conformance with sampling rate
     t = UTCDateTime(1969, 12, 31, 23, 59, 59, 991110)
     self.assertEqual(temp.stats.starttime, t)
     delta = int((tr.stats.starttime - t) * tr.stats.sampling_rate + .5)
     np.testing.assert_array_equal(tr.data, temp.data[delta:delta + 111])
     # Make sure the original Trace object did not change.
     np.testing.assert_array_equal(tr.data, org_data)
     self.assertEqual(tr.data.ctypes.data, mem_pos)
     self.assertEqual(tr.stats, org_stats)
Пример #8
0
 def test_readAndWriteViaObsPy(self):
     """
     Read and Write files via L{obspy.Trace}
     """
     gse2file = os.path.join(self.path, 'data', 'loc_RNON20040609200559.z')
     # read trace
     st1 = read(gse2file)
     st1.verify()
     tr1 = st1[0]
     # write comparison trace
     with NamedTemporaryFile() as tf:
         tempfile = tf.name
         st2 = Stream()
         st2.traces.append(Trace())
         tr2 = st2[0]
         tr2.data = copy.deepcopy(tr1.data)
         tr2.stats = copy.deepcopy(tr1.stats)
         st2.write(tempfile, format='GSE2')
         # read comparison trace
         st3 = read(tempfile)
     st3.verify()
     tr3 = st3[0]
     # check if equal
     self.assertEqual(tr3.stats['station'], tr1.stats['station'])
     self.assertEqual(tr3.stats.npts, tr1.stats.npts)
     self.assertEqual(tr3.stats['sampling_rate'],
                      tr1.stats['sampling_rate'])
     self.assertEqual(tr3.stats.get('channel'),
                      tr1.stats.get('channel'))
     self.assertEqual(tr3.stats.get('starttime'),
                      tr1.stats.get('starttime'))
     self.assertEqual(tr3.stats.get('calib'),
                      tr1.stats.get('calib'))
     self.assertEqual(tr3.stats.gse2.get('vang'),
                      tr1.stats.gse2.get('vang'))
     self.assertEqual(tr3.stats.gse2.get('calper'),
                      tr1.stats.gse2.get('calper'))
     np.testing.assert_equal(tr3.data, tr1.data)
Пример #9
0
def _read_dmx(filename, **kwargs):
    station = kwargs.get("station", None)

    traces = []
    with open(filename, "rb") as fid:
        content = fid.read()

    with SpooledTemporaryFile(mode='w+b') as fid:
        fid.write(content)
        fid.seek(0)

        while fid.read(12):  # we require at least 1 full structtag
            fid.seek(-12, 1)
            structtag = readstructtag(fid)
            if structtag.id_struct == 7:
                descripttrace = readdescripttrace(fid)
                if station is None or descripttrace.st_name.strip() == station:
                    data = readdata(fid, descripttrace.length,
                                    descripttrace.datatype)
                    tr = Trace(data=np.asarray(data))
                    tr.stats.network = descripttrace.network.strip()
                    tr.stats.station = descripttrace.st_name.strip()
                    tr.stats.channel = descripttrace.component
                    tr.stats.sampling_rate = descripttrace.rate
                    tr.stats.starttime = UTCDateTime(descripttrace.begintime)
                    tr.stats.dmx = AttribDict({
                        "descripttrace": descripttrace,
                        "structtag": structtag
                    })
                    traces.append(tr)
                else:
                    fid.seek(int(structtag.len_data), 1)
            else:
                fid.seek(
                    int(structtag.len_struct) + int(structtag.len_data), 1)

    st = Stream(traces=traces)
    return st
Пример #10
0
    def test_writeAndReadDifferentEncodings(self):
        """
        Writes and read a file with different encoding via the obspy.core
        methods.
        """
        npts = 1000
        np.random.seed(815)  # make test reproducable
        data = np.random.randn(npts).astype(np.float64) * 1e3 + .5
        st = Stream([Trace(data=data)])
        # Loop over some record lengths.
        for encoding, value in ENCODINGS.items():
            # Skip encodings that cannot be written.
            if not value[3]:
                continue

            seed_dtype = value[2]
            # Special handling for the ASCII dtype. NumPy 1.7 changes the
            # default dtype of numpy.string_ from "|S1" to "|S32". Enforce
            # "|S1|" here to be consistent across NumPy versions.
            if encoding == 0:
                seed_dtype = native_str("|S1")
            with NamedTemporaryFile() as tf:
                tempfile = tf.name
                # Write it once with the encoding key and once with the value.
                st[0].data = data.astype(seed_dtype)
                st.verify()
                st.write(tempfile, format="MSEED", encoding=encoding)
                st2 = read(tempfile)
                del st2[0].stats.mseed
                np.testing.assert_array_equal(
                    st[0].data, st2[0].data,
                    "Arrays are not equal for encoding '%s'" %
                    ENCODINGS[encoding][0])
                del st2
                ms = _MSStruct(tempfile)
                ms.read(-1, 1, 1, 0)
                self.assertEqual(ms.msr.contents.encoding, encoding)
                del ms  # for valgrind
Пример #11
0
def gauge2sac(gauge_file, dictionary, xyfile, outdir, time_epi, dt):
    '''
    Convert output from fort.gauge file intop individual sac files
    '''
    from numpy import genfromtxt, unique, where, arange, interp
    from obspy import Stream, Trace
    from obspy.core.util.attribdict import AttribDict

    #Read gauge file
    gauges = genfromtxt(gauge_file)
    #Read names
    plume_name = genfromtxt(dictionary, usecols=0, dtype='S')
    claw_name = genfromtxt(dictionary, usecols=1)
    lat = genfromtxt(xyfile, usecols=2)
    lon = genfromtxt(xyfile, usecols=3)
    #Find unique stations
    gauge_list = unique(gauges[:, 0])
    for k in range(len(gauge_list)):
        print(k)
        st = Stream(Trace())
        i = where(gauges[:, 0] == gauge_list[k])[0]
        data = gauges[i, 6]
        time = gauges[i, 2]
        ti = arange(0, time.max(), dt)
        tsunami = interp(ti, time, data)
        st[0].data = tsunami
        st[0].stats.starttime = time_epi
        st[0].stats.delta = dt
        iname = where(claw_name == gauge_list[k])[0][0]
        st[0].stats.station = plume_name[iname]
        sac = AttribDict()
        sac.stla = lat[iname]
        sac.stlo = lon[iname]
        sac.evla = 46.607
        sac.evlo = 153.230
        #sac.iztype='IO'
        st[0].stats['sac'] = sac
        st.write(outdir + '/' + plume_name[iname] + '.tsun.sac', format='SAC')
Пример #12
0
def SVD_sim(SP, lowcut, highcut, samp_rate,
            amp_range=np.arange(-10, 10, 0.01)):
    """
    Generate basis vectors of a set of simulated seismograms.

    Inputs should have a range of S-P amplitude ratios, in theory to simulate \
    a range of focal mechanisms.

    :type SP: int
    :param SP: S-P time in seconds - will be converted to samples according \
        to samp_rate.
    :type lowcut: float
    :param lowcut: Low-cut for bandpass filter in Hz
    :type highcut: float
    :param highcut: High-cut for bandpass filter in Hz
    :type samp_rate: float
    :param samp_rate: Sampling rate in Hz
    :type amp_range: np.ndarray
    :param amp_range: Amplitude ratio range to generate synthetics for.

    :returns: nd.ndarray, set of output basis vectors
    """
    from obspy import Stream, Trace
    # Convert SP to samples
    SP = int(SP * samp_rate)
    # Scan through a range of amplitude ratios
    synthetics = [Stream(Trace(seis_sim(SP, a))) for a in amp_range]
    for st in synthetics:
        for tr in st:
            tr.stats.station = 'SYNTH'
            tr.stats.channel = 'SH1'
            tr.stats.sampling_rate = samp_rate
            tr.filter('bandpass', freqmin=lowcut, freqmax=highcut)
    # We have a list of obspy Trace objects, we can pass this to EQcorrscan's\
            # SVD functions
    from utils import clustering
    V, s, U, stachans = clustering.SVD(synthetics)
    return V, s, U, stachans
Пример #13
0
 def test_trim(self):
     """
     Tests the trim method of the Trace class.
     """
     # set up
     trace = Trace(data=np.arange(1001))
     start = UTCDateTime(2000, 1, 1, 0, 0, 0, 0)
     trace.stats.starttime = start
     trace.stats.sampling_rate = 200.0
     end = UTCDateTime(2000, 1, 1, 0, 0, 5, 0)
     trace.verify()
     # rtrim 100 samples
     trace.trim(0.5, 0.5)
     trace.verify()
     np.testing.assert_array_equal(trace.data[-5:],
                                   np.array([896, 897, 898, 899, 900]))
     np.testing.assert_array_equal(trace.data[:5],
                                   np.array([100, 101, 102, 103, 104]))
     self.assertEqual(len(trace.data), 801)
     self.assertEqual(trace.stats.npts, 801)
     self.assertEqual(trace.stats.sampling_rate, 200.0)
     self.assertEqual(trace.stats.starttime, start + 0.5)
     self.assertEqual(trace.stats.endtime, end - 0.5)
Пример #14
0
def gauss_trace_group(gauss_stat_group) -> TraceGroup:
    """ Create a TraceGroup with a Gaussian pulse as the data """
    # Generate the data
    data = gauss(_t, _a, _b, _c)
    gauss_stat_group.data["sampling_rate"] = 1 / _dt
    # Build a stream from the data
    tr = Trace(
        data=data,
        header={
            "starttime": to_utc(gauss_stat_group.data.iloc[0].starttime),
            "delta": _dt,
            "network": "UK",
            "station": "STA1",
            "channel": "HHZ",
        },
    )
    st = Stream()
    st.append(tr)
    # Add a second trace with a substantial discontinuity caused by zero-padding
    st.append(tr.copy())  # Same data, but the time window in the StatsGroup halves it
    st[1].stats.station = "STA2"
    # Make a TraceGroup
    return mopy.TraceGroup(gauss_stat_group, st, "displacement").fillna()
Пример #15
0
 def test_writingInvalidDataQuality(self):
     """
     Trying to write an invalid dataquality results in an error. Only D, R,
     Q and M are allowed.
     """
     data = np.zeros(10)
     # Create 4 different traces with 4 different dataqualities.
     stats1 = {
         'network': 'BW',
         'station': 'TEST',
         'location': 'A',
         'channel': 'EHE',
         'npts': len(data),
         'sampling_rate': 200.0,
         'mseed': {
             'dataquality': 'X'
         }
     }
     st = Stream([Trace(data=data, header=stats1)])
     with NamedTemporaryFile() as tf:
         tempfile = tf.name
         # Write it.
         self.assertRaises(ValueError, st.write, tempfile, format="MSEED")
Пример #16
0
 def test_bug_write_read_float32_seed_win32(self):
     """
     Test case for issue #64.
     """
     # create stream object
     data = np.array([395.07809448, 395.0782, 1060.28112793, -1157.37487793,
                      -1236.56237793, 355.07028198, -1181.42175293],
                     dtype=np.float32)
     st = Stream([Trace(data=data)])
     with NamedTemporaryFile() as tf:
         tempfile = tf.name
         _write_mseed(st, tempfile, format="MSEED")
         # read temp file directly without libmseed
         with open(tempfile, 'rb') as fp:
             fp.seek(56)
             dtype = np.dtype(native_str('>f4'))
             bin_data = from_buffer(fp.read(7 * dtype.itemsize),
                                    dtype=dtype)
         np.testing.assert_array_equal(data, bin_data)
         # read via ObsPy
         st2 = _read_mseed(tempfile)
     # test results
     np.testing.assert_array_equal(data, st2[0].data)
Пример #17
0
 def test_writing_micro_seconds(self):
     """
     Test case for #194. Check that microseconds are written to
     the SAC header b
     """
     np.random.seed(815)
     head = {
         'network': 'NL',
         'station': 'HGN',
         'channel': 'BHZ',
         'sampling_rate': 200.0,
         'starttime': UTCDateTime(2003, 5, 29, 2, 13, 22, 999999)
     }
     data = np.random.randint(0, 5000, 100).astype(np.int32)
     st = Stream([Trace(header=head, data=data)])
     # write them as SAC
     with NamedTemporaryFile() as tf:
         tmpfile = tf.name
         st.write(tmpfile, format="SAC")
         st2 = read(tmpfile, format="SAC")
     # check all the required entries (see url in docstring)
     self.assertEqual(st2[0].stats.starttime, st[0].stats.starttime)
     self.assertAlmostEqual(st2[0].stats.sac.b, 0.000999)
Пример #18
0
def process_tide_gauge(fin, fout, station):

    time = c_[genfromtxt(fin, usecols=0, dtype='S'),
              genfromtxt(fin, usecols=1, dtype='S')]
    t1 = UTCDateTime(time[0, 0] + time[0, 1])
    #Make every time relative ins econds to t1
    t = zeros(len(time))
    for k in range(len(time)):
        t[k] = UTCDateTime(time[k, 0] + time[k, 1]) - t1

    #read data
    data = genfromtxt(fin, usecols=2)

    #metadata?
    dt = int(t[1] - t[0])

    #Put in sac file
    st = Stream(Trace())
    st[0].stats.station = station
    st[0].stats.delta = dt
    st[0].stats.starttime = t1
    st[0].data = data
    st.write(fout, format='SAC')
Пример #19
0
 def test_bugWriteReadFloat32SEEDWin32(self):
     """
     Test case for issue #64.
     """
     # create stream object
     data = np.array([
         395.07809448, 395.0782, 1060.28112793, -1157.37487793,
         -1236.56237793, 355.07028198, -1181.42175293
     ],
                     dtype=np.float32)
     st = Stream([Trace(data=data)])
     with NamedTemporaryFile() as tf:
         tempfile = tf.name
         writeMSEED(st, tempfile, format="MSEED")
         # read temp file directly without libmseed
         with open(tempfile, 'rb') as fp:
             fp.seek(56)
             bin_data = np.fromfile(fp, dtype='>f4', count=7)
         np.testing.assert_array_equal(data, bin_data)
         # read via ObsPy
         st2 = readMSEED(tempfile)
     # test results
     np.testing.assert_array_equal(data, st2[0].data)
Пример #20
0
def svd_to_stream(uvectors, stachans, k, sampling_rate):
    """
    Convert the singular vectors output by SVD to streams.

    One stream will be generated for each singular vector level,
    for all channels.  Useful for plotting, and aiding seismologists thinking
    of waveforms!

    :type svectors: list
    :param svectors: List of :class:`numpy.ndarray` Singular vectors
    :type stachans: list
    :param stachans: List of station.channel Strings
    :type k: int
    :param k: Number of streams to return = number of SV's to include
    :type sampling_rate: float
    :param sampling_rate: Sampling rate in Hz

    :returns:
        svstreams, List of :class:`obspy.core.stream.Stream`, with
        svStreams[0] being composed of the highest rank singular vectors.
    """
    svstreams = []
    for i in range(k):
        svstream = []
        for j, stachan in enumerate(stachans):
            if len(uvectors[j]) <= k:
                warnings.warn('Too few traces at %s for a %02d dimensional '
                              'subspace. Detector streams will not include '
                              'this channel.' % ('.'.join(stachan[0],
                                                          stachan[1]), k))
            else:
                svstream.append(Trace(uvectors[j][i],
                                      header={'station': stachan[0],
                                              'channel': stachan[1],
                                              'sampling_rate': sampling_rate}))
        svstreams.append(Stream(svstream))
    return svstreams
Пример #21
0
 def test_decimate(self):
     """
     Tests the decimate method of the Trace object.
     """
     # create test Trace
     tr = Trace(data=np.arange(20))
     tr_bkp = deepcopy(tr)
     # some test that should fail and leave the original trace alone
     self.assertRaises(ValueError, tr.decimate, 7, strict_length=True)
     self.assertRaises(ValueError, tr.decimate, 9, strict_length=True)
     self.assertRaises(ArithmeticError, tr.decimate, 18)
     # some tests in place
     tr.decimate(4, no_filter=True)
     np.testing.assert_array_equal(tr.data, np.arange(0, 20, 4))
     self.assertEqual(tr.stats.npts, 5)
     self.assertEqual(tr.stats.sampling_rate, 0.25)
     self.assertEqual(tr.stats.processing,
                      ["downsample:integerDecimation:4"])
     tr = tr_bkp.copy()
     tr.decimate(10, no_filter=True)
     np.testing.assert_array_equal(tr.data, np.arange(0, 20, 10))
     self.assertEqual(tr.stats.npts, 2)
     self.assertEqual(tr.stats.sampling_rate, 0.1)
     self.assertEqual(tr.stats.processing,
                      ["downsample:integerDecimation:10"])
     # some tests with automatic prefiltering
     tr = tr_bkp.copy()
     tr2 = tr_bkp.copy()
     tr.decimate(4)
     df = tr2.stats.sampling_rate
     tr2.data, fp = lowpassCheby2(data=tr2.data, freq=df * 0.5 / 4.0,
                                  df=df, maxorder=12, ba=False,
                                  freq_passband=True)
     # check that iteratively determined pass band frequency is correct
     self.assertAlmostEqual(0.0811378285461, fp, places=7)
     tr2.decimate(4, no_filter=True)
     np.testing.assert_array_equal(tr.data, tr2.data)
Пример #22
0
    def test_writing_blockette_100(self):
        """
        Tests that blockette 100 is written correctly. It is only used if
        the sampling rate is higher than 32727 Hz or smaller than 1.0 /
        32727.0 Hz.
        """
        # Three traces, only the middle one needs it.
        tr = Trace(data=np.linspace(0, 100, 101))
        st = Stream(traces=[tr.copy(), tr.copy(), tr.copy()])

        st[1].stats.sampling_rate = 60000.0

        with io.BytesIO() as buf:
            st.write(buf, format="mseed")
            buf.seek(0, 0)
            st2 = read(buf)

        self.assertTrue(
            np.allclose(st[0].stats.sampling_rate, st2[0].stats.sampling_rate))
        self.assertTrue(
            np.allclose(st[1].stats.sampling_rate, st2[1].stats.sampling_rate))
        self.assertTrue(
            np.allclose(st[2].stats.sampling_rate, st2[2].stats.sampling_rate))

        st[1].stats.sampling_rate = 1.0 / 60000.0

        with io.BytesIO() as buf:
            st.write(buf, format="mseed")
            buf.seek(0, 0)
            st2 = read(buf)

        self.assertTrue(
            np.allclose(st[0].stats.sampling_rate, st2[0].stats.sampling_rate))
        self.assertTrue(
            np.allclose(st[1].stats.sampling_rate, st2[1].stats.sampling_rate))
        self.assertTrue(
            np.allclose(st[2].stats.sampling_rate, st2[2].stats.sampling_rate))
Пример #23
0
def get_dropouts(stations, working_dir, net):
    '''
    loop over all sites get drops, put in an mseed file
    
    '''

    from obspy import Stream, Trace

    #Summary file
    f = open(working_dir + '_drops.summary', 'w')
    f.write('# Station, samples streamed, samples dropped, % received\n')

    for k in range(len(stations)):

        station_file = working_dir + stations[k] + '.LXE.mseed'

        try:
            t0, drops, Nsamples = dropouts(station_file)
            st = Stream(Trace())
            st[0].data = drops
            st[0].stats.starttime = t0
            st[0].stats.delta = 1.0
            st[0].stats.station = stations[k]
            st[0].stats.network = net
            st[0].stats.channel = 'ZXD'
            out_file = working_dir + stations[k] + '.' + st[
                0].stats.channel + '.mseed'
            st[0].write(out_file, format='MSEED')

            #add to summary file
            line = '%s\t%d\t%d\t%.1f\n' % (stations[k], Nsamples, len(drops),
                                           100 - 100 * len(drops) / Nsamples)
            f.write(line)
        except:
            print('... no data for' + station_file)

    f.close()
def read_smc_file(acc_file):
    # reads the "smc" file (synthetic accelerogram generated by smsim)
    # outputs the displacement as obspy trace object in SI format (meters)

    f = open(acc_file, 'r')
    lines = f.readlines()[40:]
    f.close()

    acc = []
    for ln in lines:
        for i in range(0, int(len(ln) / 10)):
            acc.append(float(ln[10 * i:10 * (i + 1)]))

    acc = array(acc)
    l = len(acc) * 8
    #acc = reshape(acc, (1, l))[0]

    acc_tr = Trace()
    acc_tr.data = acc / 100.
    acc_tr.stats.sampling_rate = 100.
    acc_tr.stats.delta = 0.01

    disp_trace = acc_tr.copy().integrate().integrate()
    return disp_trace
Пример #25
0
def _internal_get_sample_data():
    """
    Returns some real data (trace and poles and zeroes) for PPSD testing.

    Data was downsampled to 100Hz so the PPSD is a bit distorted which does
    not matter for the purpose of testing.
    """
    # load test file
    file_data = os.path.join(
        PATH, 'BW.KW1._.EHZ.D.2011.090_downsampled.asc.gz')
    # parameters for the test
    with gzip.open(file_data) as f:
        data = np.loadtxt(f)
    stats = {'_format': 'MSEED',
             'calib': 1.0,
             'channel': 'EHZ',
             'delta': 0.01,
             'endtime': UTCDateTime(2011, 3, 31, 2, 36, 0, 180000),
             'location': '',
             'mseed': {'dataquality': 'D', 'record_length': 512,
                       'encoding': 'STEIM2', 'byteorder': '>'},
             'network': 'BW',
             'npts': 936001,
             'sampling_rate': 100.0,
             'starttime': UTCDateTime(2011, 3, 31, 0, 0, 0, 180000),
             'station': 'KW1'}
    tr = Trace(data, stats)

    paz = {'gain': 60077000.0,
           'poles': [(-0.037004 + 0.037016j), (-0.037004 - 0.037016j),
                     (-251.33 + 0j), (-131.04 - 467.29j),
                     (-131.04 + 467.29j)],
           'sensitivity': 2516778400.0,
           'zeros': [0j, 0j]}

    return tr, paz
Пример #26
0
def _internal_process(st, lowcut, highcut, filt_order, sampling_rate,
                      first_length, stachan, debug, i=0):
    tr = st.select(station=stachan[0], channel=stachan[1])
    if len(tr) == 0:
        tr = Trace(np.zeros(int(first_length * sampling_rate)))
        tr.stats.station = stachan[0]
        tr.stats.channel = stachan[1]
        tr.stats.sampling_rate = sampling_rate
        tr.stats.starttime = st[0].stats.starttime  # Do this to make more
        # sensible plots
        warnings.warn('Padding stream with zero trace for ' +
                      'station ' + stachan[0] + '.' + stachan[1])
    elif len(tr) == 1:
        tr = tr[0]
        tr.detrend('simple')
        tr = pre_processing.process(tr=tr, lowcut=lowcut, highcut=highcut,
                                    filt_order=filt_order,
                                    samp_rate=sampling_rate, debug=debug,
                                    seisan_chan_names=False)
    else:
        msg = ('Multiple channels for ' + stachan[0] + '.' +
               stachan[1] + ' in a single design stream.')
        raise IOError(msg)
    return i, tr
Пример #27
0
 def test_Header(self):
     """
     Tests whether the header is correctly written and read.
     """
     np.random.seed(815)  # make test reproducable
     data = np.random.randint(-1000, 1000, 50).astype('int32')
     stats = {
         'network': 'BW',
         'station': 'TEST',
         'location': 'A',
         'channel': 'EHE',
         'npts': len(data),
         'sampling_rate': 200.0,
         'mseed': {
             'record_length': 512,
             'encoding': 'STEIM2',
             'filesize': 512,
             'dataquality': 'D',
             'number_of_records': 1,
             'byteorder': '>'
         }
     }
     stats['starttime'] = UTCDateTime(2000, 1, 1)
     st = Stream([Trace(data=data, header=stats)])
     with NamedTemporaryFile() as tf:
         tempfile = tf.name
         # Write it.
         st.write(tempfile, format="MSEED")
         # Read it again and delete the temporary file.
         stream = read(tempfile)
     stream.verify()
     # Loop over the attributes to be able to assert them because a
     # dictionary is not a stats dictionary.
     # This also assures that there are no additional keys.
     for key in stats.keys():
         self.assertEqual(stats[key], stream[0].stats[key])
Пример #28
0
 def test_spectogram(self):
     """
     Create spectogram plotting examples in tests/output directory.
     """
     # Create dynamic test_files to avoid dependencies of other modules.
     # set specific seed value such that random numbers are reproduceable
     np.random.seed(815)
     head = {
         'network': 'BW',
         'station': 'BGLD',
         'starttime': UTCDateTime(2007, 12, 31, 23, 59, 59, 915000),
         'sampling_rate': 200.0,
         'channel': 'EHE'
     }
     tr = Trace(data=np.random.randint(0, 1000, 824), header=head)
     st = Stream([tr])
     # 1 - using log=True
     with NamedTemporaryFile(suffix='.png') as tf:
         spectrogram.spectrogram(st[0].data,
                                 log=True,
                                 outfile=tf.name,
                                 samp_rate=st[0].stats.sampling_rate,
                                 show=False)
         # compare images
         expected_image = os.path.join(self.path, 'spectogram_log.png')
         compare_images(tf.name, expected_image, 0.001)
     # 2 - using log=False
     with NamedTemporaryFile(suffix='.png') as tf:
         spectrogram.spectrogram(st[0].data,
                                 log=False,
                                 outfile=tf.name,
                                 samp_rate=st[0].stats.sampling_rate,
                                 show=False)
         # compare images
         expected_image = os.path.join(self.path, 'spectogram.png')
         compare_images(tf.name, expected_image, 0.001)
Пример #29
0
 def test_trimFloatingPointWithPadding1(self):
     """
     Tests the slicing of trace objects with the use of the padding option.
     """
     # Create test array that allows for easy testing.
     tr = Trace(data=np.arange(11))
     org_stats = deepcopy(tr.stats)
     org_data = deepcopy(tr.data)
     # Save memory position of array.
     mem_pos = tr.data.ctypes.data
     # Just some sanity tests.
     self.assertEqual(tr.stats.starttime, UTCDateTime(0))
     self.assertEqual(tr.stats.endtime, UTCDateTime(10))
     # Create temp trace object used for testing.
     st = tr.stats.starttime
     # Using out of bounds times should not do anything but create
     # a copy of the stats.
     temp = deepcopy(tr)
     temp.trim(st - 2.5, st + 200, pad=True)
     self.assertEqual(temp.stats.starttime.timestamp, -2.0)
     self.assertEqual(temp.stats.endtime.timestamp, 200)
     self.assertEqual(temp.stats.npts, 203)
     mask = np.zeros(203).astype("bool")
     mask[:2] = True
     mask[13:] = True
     np.testing.assert_array_equal(temp.data.mask, mask)
     # Alter the new stats to make sure the old one stays intact.
     temp.stats.starttime = UTCDateTime(1000)
     self.assertEqual(org_stats, tr.stats)
     # Check if the data adress is not the same, that is it is a copy
     self.assertNotEqual(temp.data.ctypes.data, tr.data.ctypes.data)
     np.testing.assert_array_equal(tr.data, temp.data[2:13])
     # Make sure the original Trace object did not change.
     np.testing.assert_array_equal(tr.data, org_data)
     self.assertEqual(tr.data.ctypes.data, mem_pos)
     self.assertEqual(tr.stats, org_stats)
Пример #30
0
def create_stream_from_baikal_file(bf, use_coefs=False):
    """ получить stream (XX-5) """
    # header
    header = bf.MainHeader
    # datetime
    date = datetime.datetime(header["year"], header["month"], header["day"])
    delta = datetime.timedelta(seconds=header["to"])
    dt = date + delta
    # make utc datetime
    utcdatetime = UTCDateTime(dt, precision=3)
    # названия каналов в выходном файле (N, E, Z). Грубые каналы отбрасываются
    data_traces = bf.traces.astype(np.float64)[:3]
    # умножить на коэф-ты
    if use_coefs:
        for _i, ch_header in enumerate(bf.ChannelHeaders):
            koef = ch_header['koef_chan']
            data_traces[_i] = data_traces[_i] * koef
    traces = []
    for channel, data in zip(CHANNELS, data_traces):
        # подготовить заголовок
        stats = DEFAULT_STATS.copy()
        stats.update({
            "station": header['station'].upper()[:3],
            'channel': channel,
            'sampling_rate': round(1. / header["dt"]),
            "delta": header["dt"],
            "npts": data.size,
            'starttime': utcdatetime,
        })
        # создать трассу
        trace = Trace(data=data, header=stats)
        # объединять все в одну трассу
        traces.append(trace)
    # create Stream
    stream = Stream(traces)
    return stream