Ejemplo n.º 1
0
    def test_read_invalid_filename(self):
        """
        Tests that we get a sane error message when calling read()
        with a filename that doesn't exist
        """
        doesnt_exist = 'dsfhjkfs'
        for i in range(10):
            if os.path.exists(doesnt_exist):
                doesnt_exist += doesnt_exist
                continue
            break
        else:
            self.fail('unable to get invalid file path')
        doesnt_exist = native_str(doesnt_exist)

        if PY2:
            exception_type = getattr(builtins, 'IOError')
        else:
            exception_type = getattr(builtins, 'FileNotFoundError')
        exception_msg = "[Errno 2] No such file or directory: '{}'"

        formats = _get_entry_points(
            'obspy.plugin.catalog', 'readFormat').keys()
        # try read_inventory() with invalid filename for all registered read
        # plugins and also for filetype autodiscovery
        formats = [None] + list(formats)
        for format in formats:
            with self.assertRaises(exception_type) as e:
                read(doesnt_exist, format=format)
            self.assertEqual(
                str(e.exception), exception_msg.format(doesnt_exist))
Ejemplo n.º 2
0
def su_specfem3d_obspy(prefix='SEM', channel=None, suffix='', byteorder='<', verbose=False):
    """ Reads Seismic Unix file
    """
    from obspy import read

    if channel in ['x']:
        wildcard = '%s/*_dx_SU%s' % (prefix, suffix)
    elif channel in ['y']:
        wildcard = '%s/*_dy_SU%s' % (prefix, suffix)
    elif channel in ['z']:
        wildcard = '%s/*_dz_SU%s' % (prefix, suffix)
    elif channel in ['p']:
        wildcard = '%s/*_dp_SU%s' % (prefix, suffix)
    else:
        raise ValueError('CHANNEL must be one of the following: x y z p')

    filenamess = _glob.glob(wildcard)

    sort_by = lambda x: int(basename(x).split('_')[0])
    filenames = sorted(filenamess, key=sort_by)

    streamobj = read(filenames.pop(), format='SU', byteorder='<')
    for filename in filenames:
        streamobj += read(filename, format='SU', byteorder='<')

    return streamobj
Ejemplo n.º 3
0
    def test_full_seed_with_non_default_dataquality(self):
        """
        Tests the reading of full SEED files with dataqualities other then D.
        """
        # Test the normal one first.
        filename = os.path.join(self.path, 'data', 'fullseed.mseed')
        st = read(filename)
        self.assertEqual(st[0].stats.mseed.dataquality, "D")

        # Test the others. They should also have identical data.
        filename = os.path.join(self.path, 'data',
                                'fullseed_dataquality_M.mseed')
        st = read(filename)
        data_m = st[0].data
        self.assertEqual(len(st), 1)
        self.assertEqual(st[0].stats.mseed.dataquality, "M")

        filename = os.path.join(self.path, 'data',
                                'fullseed_dataquality_R.mseed')
        st = read(filename)
        data_r = st[0].data
        self.assertEqual(len(st), 1)
        self.assertEqual(st[0].stats.mseed.dataquality, "R")

        filename = os.path.join(self.path, 'data',
                                'fullseed_dataquality_Q.mseed')
        st = read(filename)
        data_q = st[0].data
        self.assertEqual(len(st), 1)
        self.assertEqual(st[0].stats.mseed.dataquality, "Q")

        # Assert that the data is the same.
        np.testing.assert_array_equal(data_m, data_r)
        np.testing.assert_array_equal(data_m, data_q)
Ejemplo n.º 4
0
    def test_saving_directly_to_file(self):
        # Save to a filename.
        with NamedTemporaryFile() as tf:
            filename = tf.name
            st = self.c.get_waveforms(
                model="test", network="IU", station="ANMO",
                eventid="GCMT:C201002270634A", starttime="P-10",
                endtime="P+10", components="Z", filename=tf)
            # No return value.
            self.assertTrue(st is None)

            st = obspy.read(filename)
            self.assertEqual(len(st), 1)

        # Save to an open file-like object.
        with io.BytesIO() as buf:
            st = self.c.get_waveforms(
                model="test", network="IU", station="ANMO",
                eventid="GCMT:C201002270634A", starttime="P-10",
                endtime="P+10", components="Z", filename=buf)
            # No return value.
            self.assertTrue(st is None)

            buf.seek(0, 0)
            st = obspy.read(buf)
            self.assertEqual(len(st), 1)
Ejemplo n.º 5
0
    def test_mseed_zero_data_headonly(self):
        """
        Tests that records with no data correctly work in headonly mode.
        """
        file = os.path.join(self.path, "data",
                            "three_records_zero_data_in_middle.mseed")

        expected = [
            ("BW.BGLD..EHE", UTCDateTime("2007-12-31T23:59:59.765000Z"),
             UTCDateTime("2008-01-01T00:00:01.820000Z"), 200.0, 412),
            ("BW.BGLD..EHE", UTCDateTime("2008-01-01T00:00:01.825000Z"),
             UTCDateTime("2008-01-01T00:00:01.825000Z"), 200.0, 0),
            ("BW.BGLD..EHE", UTCDateTime("2008-01-01T00:00:03.885000Z"),
             UTCDateTime("2008-01-01T00:00:05.940000Z"), 200.0, 412)]

        # Default full read.
        st = read(file)
        self.assertEqual(len(st), 3)
        for tr, exp in zip(st, expected):
            self.assertEqual(tr.id, exp[0])
            self.assertEqual(tr.stats.starttime, exp[1])
            self.assertEqual(tr.stats.endtime, exp[2])
            self.assertEqual(tr.stats.sampling_rate, exp[3])
            self.assertEqual(tr.stats.npts, exp[4])

        # Headonly read.
        st = read(file, headonly=True)
        self.assertEqual(len(st), 3)
        for tr, exp in zip(st, expected):
            self.assertEqual(tr.id, exp[0])
            self.assertEqual(tr.stats.starttime, exp[1])
            self.assertEqual(tr.stats.endtime, exp[2])
            self.assertEqual(tr.stats.sampling_rate, exp[3])
            self.assertEqual(tr.stats.npts, exp[4])
Ejemplo n.º 6
0
    def test_xcorrPickCorrection(self):
        """
        Test cross correlation pick correction on a set of two small local
        earthquakes.
        """
        st1 = read(os.path.join(self.path,
                                'BW.UH1._.EHZ.D.2010.147.a.slist.gz'))
        st2 = read(os.path.join(self.path,
                                'BW.UH1._.EHZ.D.2010.147.b.slist.gz'))

        tr1 = st1.select(component="Z")[0]
        tr2 = st2.select(component="Z")[0]
        t1 = UTCDateTime("2010-05-27T16:24:33.315000Z")
        t2 = UTCDateTime("2010-05-27T16:27:30.585000Z")

        dt, coeff = xcorrPickCorrection(t1, tr1, t2, tr2, 0.05, 0.2, 0.1)
        self.assertAlmostEqual(dt, -0.014459080288833711)
        self.assertAlmostEqual(coeff, 0.91542878457939791)
        dt, coeff = xcorrPickCorrection(t2, tr2, t1, tr1, 0.05, 0.2, 0.1)
        self.assertAlmostEqual(dt, 0.014459080288833711)
        self.assertAlmostEqual(coeff, 0.91542878457939791)
        dt, coeff = xcorrPickCorrection(
            t1, tr1, t2, tr2, 0.05, 0.2, 0.1, filter="bandpass",
            filter_options={'freqmin': 1, 'freqmax': 10})
        self.assertAlmostEqual(dt, -0.013025086360067755)
        self.assertAlmostEqual(coeff, 0.98279277273758803)
Ejemplo n.º 7
0
 def test_write_stream_via_obspy(self):
     """
     Write streams, i.e. multiple files via obspy.core.Trace
     """
     testdata = np.array([111, 111, 111, 111, 111, 109, 106, 103, 103,
                          110, 121, 132, 139])
     with NamedTemporaryFile() as fh:
         testfile = fh.name
         self.file = os.path.join(self.path, '3cssan.reg.8.1.RNON.wav')
         tr = read(self.file, format='WAV')[0]
         np.testing.assert_array_equal(tr.data[:13], testdata)
         # write
         st2 = Stream([Trace(), Trace()])
         st2[0].data = tr.data.copy()       # copy the data
         st2[1].data = tr.data.copy() // 2  # be sure data are different
         st2.write(testfile, format='WAV', framerate=7000)
         # read without giving the WAV format option
         base, ext = os.path.splitext(testfile)
         testfile0 = "%s%03d%s" % (base, 0, ext)
         testfile1 = "%s%03d%s" % (base, 1, ext)
         tr30 = read(testfile0)[0]
         tr31 = read(testfile1)[0]
         self.assertEqual(tr30.stats, tr.stats)
         self.assertEqual(tr31.stats, tr.stats)
         np.testing.assert_array_equal(tr30.data[:13], testdata)
         np.testing.assert_array_equal(tr31.data[:13], testdata // 2)
         os.remove(testfile0)
         os.remove(testfile1)
Ejemplo n.º 8
0
 def test_saveWaveformNoCompression(self):
     """
     Explicitly disable compression during waveform request and save it
     directly to disk.
     """
     # initialize client
     client = Client(user="******")
     start = UTCDateTime(2010, 1, 1, 0, 0)
     end = start + 1
     # MiniSEED
     with NamedTemporaryFile(suffix=".bz2") as tf:
         mseedfile = tf.name
         client.save_waveforms(mseedfile, "GE", "APE", "", "BHZ", start, end, compressed=False)
         st = read(mseedfile)
         # MiniSEED may not start with Volume Index Control Headers (V)
         with open(mseedfile, "rb") as fp:
             self.assertNotEqual(fp.read(8)[6:7], b"V")
         # ArcLink cuts on record base
         self.assertEqual(st[0].stats.network, "GE")
         self.assertEqual(st[0].stats.station, "APE")
         self.assertEqual(st[0].stats.location, "")
         self.assertEqual(st[0].stats.channel, "BHZ")
     # Full SEED
     with NamedTemporaryFile(suffix=".bz2") as tf:
         fseedfile = tf.name
         client.save_waveforms(fseedfile, "GE", "APE", "", "BHZ", start, end, format="FSEED")
         st = read(fseedfile)
         # Full SEED
         client.save_waveforms(fseedfile, "BW", "MANZ", "", "EHZ", start, end, format="FSEED")
         # ArcLink cuts on record base
         self.assertEqual(st[0].stats.network, "GE")
         self.assertEqual(st[0].stats.station, "APE")
         self.assertEqual(st[0].stats.location, "")
         self.assertEqual(st[0].stats.channel, "BHZ")
Ejemplo n.º 9
0
 def test_saveWaveformCompressed(self):
     """
     Tests saving compressed and not unpacked bzip2 files to disk.
     """
     # initialize client
     client = Client(user="******")
     start = UTCDateTime(2008, 1, 1, 0, 0)
     end = start + 1
     # MiniSEED
     with NamedTemporaryFile(suffix=".bz2") as tf:
         mseedfile = tf.name
         client.save_waveforms(mseedfile, "GE", "APE", "", "BHZ", start, end, unpack=False)
         # check if compressed
         with open(mseedfile, "rb") as fp:
             self.assertEqual(fp.read(2), b"BZ")
         # importing via read should work too
         read(mseedfile)
     # Full SEED
     with NamedTemporaryFile(suffix=".bz2") as tf:
         fseedfile = tf.name
         client.save_waveforms(fseedfile, "GE", "APE", "", "BHZ", start, end, format="FSEED", unpack=False)
         # check if compressed
         with open(fseedfile, "rb") as fp:
             self.assertEqual(fp.read(2), b"BZ")
         # importing via read should work too
         read(fseedfile)
Ejemplo n.º 10
0
 def test_undefined_b(self):
     """
     Test that an undefined B value (-12345.0) is not messing up the
     starttime
     """
     # read in the test file an see that sac reference time and
     # starttime of seismogram are correct
     tr = read(self.file)[0]
     self.assertEqual(tr.stats.starttime.timestamp, 269596810.0)
     self.assertEqual(tr.stats.sac.b, 10.0)
     with open(self.file, 'rb') as fh:
         sac_ref_time = SACTrace.read(fh).reftime
     self.assertEqual(sac_ref_time.timestamp, 269596800.0)
     # change b to undefined and write (same case as if b == 0.0)
     # now sac reference time and reftime of seismogram must be the
     # same
     tr.stats.sac.b = -12345.0
     with NamedTemporaryFile() as tf:
         tmpfile = tf.name
         tr.write(tmpfile, format="SAC")
         tr2 = read(tmpfile)[0]
         self.assertEqual(tr2.stats.starttime.timestamp, 269596810.0)
         self.assertEqual(tr2.stats.sac.b, 10.0)
         with open(tmpfile, "rb") as fh:
             sac_ref_time2 = SACTrace.read(fh).reftime
     self.assertEqual(sac_ref_time2.timestamp, 269596800.0)
Ejemplo n.º 11
0
 def test_issue390(self):
     """
     Read all SAC headers if debug_headers flag is enabled.
     """
     # 1 - binary SAC
     tr = read(self.file, headonly=True, debug_headers=True)[0]
     self.assertEqual(tr.stats.sac.nzyear, 1978)
     self.assertEqual(tr.stats.sac.nzjday, 199)
     self.assertEqual(tr.stats.sac.nzhour, 8)
     self.assertEqual(tr.stats.sac.nzmin, 0)
     self.assertEqual(tr.stats.sac.nzsec, 0)
     self.assertEqual(tr.stats.sac.nzmsec, 0)
     self.assertEqual(tr.stats.sac.delta, 1.0)
     self.assertEqual(tr.stats.sac.scale, -12345.0)
     self.assertEqual(tr.stats.sac.npts, 100)
     self.assertEqual(tr.stats.sac.knetwk, '-12345  ')
     self.assertEqual(tr.stats.sac.kstnm, 'STA     ')
     self.assertEqual(tr.stats.sac.kcmpnm, 'Q       ')
     # 2 - ASCII SAC
     tr = read(self.filexy, headonly=True, debug_headers=True)[0]
     self.assertEqual(tr.stats.sac.nzyear, -12345)
     self.assertEqual(tr.stats.sac.nzjday, -12345)
     self.assertEqual(tr.stats.sac.nzhour, -12345)
     self.assertEqual(tr.stats.sac.nzmin, -12345)
     self.assertEqual(tr.stats.sac.nzsec, -12345)
     self.assertEqual(tr.stats.sac.nzmsec, -12345)
     self.assertEqual(tr.stats.sac.delta, 1.0)
     self.assertEqual(tr.stats.sac.scale, -12345.0)
     self.assertEqual(tr.stats.sac.npts, 100)
     self.assertEqual(tr.stats.sac.knetwk, '-12345  ')
     self.assertEqual(tr.stats.sac.kstnm, 'sta     ')
     self.assertEqual(tr.stats.sac.kcmpnm, 'Q       ')
Ejemplo n.º 12
0
 def test_read_and_write_via_obspy(self):
     """
     Read and Write files via L{obspy.Stream}
     """
     # read trace
     tr = read(self.file)[0]
     # write comparison trace
     st2 = Stream()
     st2.traces.append(Trace())
     tr2 = st2[0]
     tr2.data = copy.deepcopy(tr.data)
     tr2.stats = copy.deepcopy(tr.stats)
     with NamedTemporaryFile() as tf:
         tempfile = tf.name
         st2.write(tempfile, format='SAC')
         # read comparison trace
         tr3 = read(tempfile)[0]
     # check if equal
     self.assertEqual(tr3.stats['station'], tr.stats['station'])
     self.assertEqual(tr3.stats.npts, tr.stats.npts)
     self.assertEqual(tr.stats['sampling_rate'], tr.stats['sampling_rate'])
     self.assertEqual(tr.stats.get('channel'), tr.stats.get('channel'))
     self.assertEqual(tr.stats.get('starttime'), tr.stats.get('starttime'))
     self.assertEqual(tr.stats.sac.get('nvhdr'), tr.stats.sac.get('nvhdr'))
     np.testing.assert_equal(tr.data, tr3.data)
Ejemplo n.º 13
0
 def test_reference_time(self):
     """
     Test case for bug #107. The SAC reference time is specified by the
     iztype. However it seems no matter what iztype is given, the
     starttime of the seismogram is calculated by adding the B header
     (in seconds) to the SAC reference time.
     """
     file = os.path.join(self.path, "data", "seism.sac")
     tr = read(file)[0]
     # see that starttime is set correctly (#107)
     self.assertAlmostEqual(tr.stats.sac.iztype, 9)
     self.assertAlmostEqual(tr.stats.sac.b, 9.4599991)
     self.assertEqual(tr.stats.starttime,
                      UTCDateTime("1981-03-29 10:38:23.459999"))
     # check that if we rewrite the file, nothing changed
     with NamedTemporaryFile() as tf:
         tmpfile = tf.name
         tr.write(tmpfile, format="SAC")
         tr2 = read(tmpfile)[0]
         self.assertEqual(tr.stats.station, tr2.stats.station)
         self.assertEqual(tr.stats.npts, tr2.stats.npts)
         self.assertEqual(tr.stats.delta, tr2.stats.delta)
         self.assertEqual(tr.stats.starttime, tr2.stats.starttime)
         self.assertEqual(tr.stats.sac.b, tr2.stats.sac.b)
         np.testing.assert_array_equal(tr.data, tr2.data)
     # test some more entries, I can see from the plot
     self.assertEqual(tr.stats.station, "CDV")
     self.assertEqual(tr.stats.channel, "Q")
Ejemplo n.º 14
0
    def test_evalresp_file_like_object(self):
        """
        Test evalresp with file like object
        """
        rawf = os.path.join(self.path, "CRLZ.HHZ.10.NZ.SAC")
        respf = os.path.join(self.path, "RESP.NZ.CRLZ.10.HHZ")

        tr1 = read(rawf)[0]
        tr2 = read(rawf)[0]

        date = UTCDateTime(2003, 11, 1, 0, 0, 0)
        seedresp = {
            "filename": respf,
            "date": date,
            "units": "VEL",
            "network": "NZ",
            "station": "CRLZ",
            "location": "10",
            "channel": "HHZ",
        }
        tr1.data = seisSim(tr1.data, tr1.stats.sampling_rate, seedresp=seedresp)

        with open(respf, "rb") as fh:
            stringio = io.BytesIO(fh.read())
        seedresp["filename"] = stringio
        tr2.data = seisSim(tr2.data, tr2.stats.sampling_rate, seedresp=seedresp)

        self.assertEqual(tr1, tr2)
Ejemplo n.º 15
0
def trim_add_noise(data_path,checker_path,search_pattern):
    '''
    Trim checkerboard data and Add gaussian noise to data
    
    data_path='/Volumes/Kanagawa/Slip_Inv/tohoku_10s/data/waveforms/'
    search_pattern='checker.*disp*'
    checker_path='/Volumes/Kanagawa/Slip_Inv/tohoku_10s/output/forward_models/'    
    '''
    from numpy import var
    from numpy.random import normal
    from glob import glob
    from obspy import read
    
    checker_files=glob(checker_path+search_pattern)
    for k in range(len(checker_files)):
        ch=read(checker_files[k])
        #Find corresponding data file
        sta=checker_files[k].split('/')[-1].split('.')[1]
        vord=checker_files[k].split('/')[-1].split('.')[2]
        comp=checker_files[k].split('/')[-1].split('.')[3]
        data_file=glob(data_path+sta+'*'+vord+'*'+comp)
        st=read(data_file[0])
        ch.trim(starttime=st[0].stats.starttime,endtime=st[0].stats.endtime)
        #determine variance
        v=2e-5 #vel
        noise=normal(loc=0.0, scale=v**0.5, size=ch[0].stats.npts)
        ch[0].data=ch[0].data+noise
        ch.write(checker_files[k],format='SAC')
Ejemplo n.º 16
0
 def test_notUsedButGivenHeaders(self):
     """
     Test case for #188
     """
     tr1 = read(self.file)[0]
     not_used = [
         "xminimum",
         "xmaximum",
         "yminimum",
         "ymaximum",
         "unused6",
         "unused7",
         "unused8",
         "unused9",
         "unused10",
         "unused11",
         "unused12",
     ]
     for i, header_value in enumerate(not_used):
         tr1.stats.sac[header_value] = i
     with NamedTemporaryFile() as tf:
         sac_file = tf.name
         tr1.write(sac_file, "SAC")
         tr2 = read(sac_file)[0]
     for i, header_value in enumerate(not_used):
         self.assertEqual(int(tr2.stats.sac[header_value]), i)
Ejemplo n.º 17
0
 def test_saveWaveformCompressed(self):
     """
     Tests saving compressed and not unpacked bzip2 files to disk.
     """
     mseedfile = NamedTemporaryFile(suffix='.bz2').name
     fseedfile = NamedTemporaryFile(suffix='.bz2').name
     try:
         # initialize client
         client = Client(user='******')
         start = UTCDateTime(2008, 1, 1, 0, 0)
         end = start + 1
         # MiniSEED
         client.saveWaveform(mseedfile, 'GE', 'APE', '', 'BHZ', start, end,
                             unpack=False)
         # check if compressed
         self.assertEquals(open(mseedfile, 'rb').read(2), 'BZ')
         # importing via read should work too
         read(mseedfile)
         # Full SEED
         client.saveWaveform(fseedfile, 'GE', 'APE', '', 'BHZ', start, end,
                             format="FSEED", unpack=False)
         # check if compressed
         self.assertEquals(open(fseedfile, 'rb').read(2), 'BZ')
         # importing via read should work too
         read(fseedfile)
     finally:
         os.remove(mseedfile)
         os.remove(fseedfile)
 def test_allDataTypesAndEndiansInMultipleFiles(self):
     """
     Tests writing all different types. This is an test which is independent
     of the read method. Only the data part is verified.
     """
     file = os.path.join(self.path, "data",
                         "BW.BGLD.__.EHE.D.2008.001.first_record")
     # Read the data and copy them
     st = read(file)
     data_copy = st[0].data.copy()
     # Float64, Float32, Int32, Int24, Int16, Char
     encodings = {5: "f8", 4: "f4", 3: "i4", 0: "S1", 1: "i2"}
     byteorders = {0: '<', 1: '>'}
     for byteorder, btype in byteorders.iteritems():
         for encoding, dtype in encodings.iteritems():
             # Convert data to floats and write them again
             st[0].data = data_copy.astype(dtype)
             with NamedTemporaryFile() as tf:
                 tempfile = tf.name
                 st.write(tempfile, format="MSEED", encoding=encoding,
                          reclen=256, byteorder=byteorder)
                 # Read the first record of data without header not using
                 # ObsPy
                 s = open(tempfile, "rb").read()
                 data = np.fromstring(s[56:256], dtype=btype + dtype)
                 np.testing.assert_array_equal(data, st[0].data[:len(data)])
                 # Read the binary chunk of data with ObsPy
                 st2 = read(tempfile)
             np.testing.assert_array_equal(st2[0].data, st[0].data)
Ejemplo n.º 19
0
 def test_readAndWriteViaObsPy(self):
     """
     Read and Write files via L{obspy.Stream}
     """
     # read trace
     tr = read(self.file)[0]
     # write comparison trace
     st2 = Stream()
     st2.traces.append(Trace())
     tr2 = st2[0]
     tr2.data = copy.deepcopy(tr.data)
     tr2.stats = copy.deepcopy(tr.stats)
     with NamedTemporaryFile() as tf:
         tempfile = tf.name
         st2.write(tempfile, format="SAC")
         # read comparison trace
         tr3 = read(tempfile)[0]
     # check if equal
     self.assertEqual(tr3.stats["station"], tr.stats["station"])
     self.assertEqual(tr3.stats.npts, tr.stats.npts)
     self.assertEqual(tr.stats["sampling_rate"], tr.stats["sampling_rate"])
     self.assertEqual(tr.stats.get("channel"), tr.stats.get("channel"))
     self.assertEqual(tr.stats.get("starttime"), tr.stats.get("starttime"))
     self.assertEqual(tr.stats.sac.get("nvhdr"), tr.stats.sac.get("nvhdr"))
     np.testing.assert_equal(tr.data, tr3.data)
Ejemplo n.º 20
0
    def test_writingSUFileWithNoHeader(self):
        """
        If the trace has no trace.su attribute, one should still be able to
        write a SeismicUnix file.

        This is not recommended because most Trace.stats attributes will be
        lost while writing SU.
        """
        st = read()
        del st[1:]
        st[0].data = np.require(st[0].data, 'float32')
        with NamedTemporaryFile() as tf:
            outfile = tf.name
            st.write(outfile, format='SU')
            st2 = read(outfile)
            # Compare new and old stream objects. All the other header
            # attributes will not be set.
            np.testing.assert_array_equal(st[0].data, st2[0].data)
            self.assertEqual(st[0].stats.starttime, st2[0].stats.starttime)
            self.assertEqual(st[0].stats.endtime, st2[0].stats.endtime)
            self.assertEqual(st[0].stats.sampling_rate,
                             st2[0].stats.sampling_rate)
            # Writing and reading this new stream object should not change
            # anything.
            st2.write(outfile, format='SU')
            st3 = read(outfile)
        np.testing.assert_array_equal(st2[0].data, st3[0].data)
        # Remove the su attributes because they will not be equal due to lazy
        # header attributes.
        del st2[0].stats.su
        del st3[0].stats.su
        self.assertEqual(st2[0].stats, st3[0].stats)
Ejemplo n.º 21
0
Archivo: main.py Proyecto: preinh/RF
def rf_dmt(events='events_rf.xml', rftype='Ps', dist=(30, 90),
           **rf_kwargs):
    events = io.read_rfevents(events)
    print events
    for event in events:
        event_id = event.resource_id.getQuakeMLURI().split('/')[-1]
        output_file = os.path.join(conf.output_path, event_id, conf.rf_data)
        input_files = glob.glob(os.path.join(conf.dmt_path, event_id,
                                             conf.dmt_data))
        while len(input_files) > 0:
            files_tmp = input_files[0][:-1] + '?'
            for f in glob.glob(files_tmp):
                input_files.remove(f)
            st = read(files_tmp, headonly=True)
            io.read_sac_header(st)
            stats = event2stats(st[0].stats.latitude, st[0].stats.longitude,
                                event, phase=rftype[0], dist_range=dist)
            if not stats:
                continue
            st = read(files_tmp)
            st.merge()
            if len(st) != 3:
                import warnings
                warnings.warn('Need 3 component seismograms. More or less '
                              'than three components for files %s' % files_tmp)
                continue
            io.read_sac_header(st)
            rf_stream(st, stats, **rf_kwargs)
            io.write_sac_header(st, event)
            for tr in st:
                io.create_dir(output_file)
                tr.write(output_file.format(stats=tr.stats), 'SAC')
Ejemplo n.º 22
0
 def test_readAndWriteStreamsViaObsPy(self):
     """
     Read and Write files containing multiple GSE2 parts via L{obspy.Trace}
     """
     files = [os.path.join(self.path, 'data', 'loc_RNON20040609200559.z'),
              os.path.join(self.path, 'data', 'loc_RJOB20050831023349.z')]
     testdata = [12, -10, 16, 33, 9, 26, 16, 7, 17, 6, 1, 3, -2]
     # write test file containing multiple GSE2 parts
     with NamedTemporaryFile() as tf:
         for filename in files:
             with open(filename, 'rb') as f1:
                 tf.write(f1.read())
         tf.flush()
         st1 = read(tf.name)
     st1.verify()
     self.assertEqual(len(st1), 2)
     tr11 = st1[0]
     tr12 = st1[1]
     self.assertEqual(tr11.stats['station'], 'RNON')
     self.assertEqual(tr12.stats['station'], 'RJOB')
     self.assertEqual(tr12.data[0:13].tolist(), testdata)
     # write and read
     with NamedTemporaryFile() as tf:
         tmpfile = tf.name
         st1.write(tmpfile, format='GSE2')
         st2 = read(tmpfile)
     st2.verify()
     self.assertEqual(len(st2), 2)
     tr21 = st1[0]
     tr22 = st1[1]
     self.assertEqual(tr21.stats['station'], 'RNON')
     self.assertEqual(tr22.stats['station'], 'RJOB')
     self.assertEqual(tr22.data[0:13].tolist(), testdata)
     np.testing.assert_equal(tr21.data, tr11.data)
     np.testing.assert_equal(tr22.data, tr12.data)
Ejemplo n.º 23
0
    def test_issue341(self):
        """
        Tests issue #341

        Read/write of MiniSEED files with huge sampling rates/delta values.
        """
        tempfile = NamedTemporaryFile().name
        # 1 - sampling rate
        st = read()
        tr = st[0]
        tr.stats.sampling_rate = 1000000000.0
        tr.write(tempfile, format="MSEED")
        # read again
        st = read(tempfile)
        self.assertEquals(st[0].stats.sampling_rate, 1000000000.0)
        # 2 - delta
        st = read()
        tr = st[0]
        tr.stats.delta = 10000000.0
        tr.write(tempfile, format="MSEED")
        # read again
        st = read(tempfile)
        self.assertAlmostEquals(st[0].stats.delta, 10000000.0, 0)
        # clean up
        os.remove(tempfile)
Ejemplo n.º 24
0
    def test_simulate(self):
        """
        Tests if calling simulate of stream gives the same result as calling
        simulate on every trace manually.
        """
        st1 = read()
        st2 = read()
        paz_sts2 = {'poles': [-0.037004 + 0.037016j, -0.037004 - 0.037016j,
                              - 251.33 + 0j, -131.04 - 467.29j,
                              - 131.04 + 467.29j],
                    'zeros': [0j, 0j],
                    'gain': 60077000.0,
                    'sensitivity': 2516778400.0}
        paz_le3d1s = {'poles': [-4.440 + 4.440j, -4.440 - 4.440j,
                                - 1.083 + 0.0j],
                      'zeros': [0.0 + 0.0j, 0.0 + 0.0j, 0.0 + 0.0j],
                      'gain': 0.4,
                      'sensitivity': 1.0}
        st1.simulate(paz_remove=paz_sts2, paz_simulate=paz_le3d1s)
        for tr in st2:
            tr.simulate(paz_remove=paz_sts2, paz_simulate=paz_le3d1s)

        # There is some strange issue on Win32bit (see #2188). Thus we just
        # use assert_allclose() here instead of testing for full equality.
        if platform.system() == "Windows" and \
                platform.architecture()[0] == "32bit":  # pragma: no cover
            for tr1, tr2 in zip(st1, st2):
                self.assertEqual(tr1.stats, tr2.stats)
                np.testing.assert_allclose(tr1.data, tr2.data, rtol=1E-6,
                                           atol=1E-6 * tr1.data.ptp())
        else:
            self.assertEqual(st1, st2)
Ejemplo n.º 25
0
    def test_relcal_sts2_vs_unknown(self):
        """
        Test relative calibration of unknown instrument vs STS2 in the same
        time range. Window length is set to 20 s, smoothing rate to 10.
        """
        st1 = read(os.path.join(self.path, 'ref_STS2'))
        st2 = read(os.path.join(self.path, 'ref_unknown'))
        calfile = os.path.join(self.path, 'STS2_simp.cal')

        freq, amp, phase = relcalstack(st1, st2, calfile, 20, smooth=10,
                                       save_data=False)

        # read in the reference responses
        un_resp = np.loadtxt(os.path.join(self.path, 'unknown.resp'))
        kn_resp = np.loadtxt(os.path.join(self.path, 'STS2.refResp'))

        # test if freq, amp and phase match the reference values
        np.testing.assert_array_almost_equal(freq, un_resp[:, 0],
                                             decimal=4)
        np.testing.assert_array_almost_equal(freq, kn_resp[:, 0],
                                             decimal=4)
        np.testing.assert_array_almost_equal(amp, un_resp[:, 1],
                                             decimal=4)
        np.testing.assert_array_almost_equal(phase, un_resp[:, 2],
                                             decimal=4)
Ejemplo n.º 26
0
def get_example_data():
    """
    Helper function returning example data for Pyadjoint.

    The returned data is fully preprocessed and ready to be used with Pyflex.

    :returns: Tuple of observed and synthetic streams
    :rtype: tuple of :class:`obspy.core.stream.Stream` objects

    .. rubric:: Example

    >>> from pyadjoint.utils import get_example_data
    >>> observed, synthetic = get_example_data()
    >>> print(observed)  # doctest: +ELLIPSIS +NORMALIZE_WHITESPACE
    3 Trace(s) in Stream:
    SY.DBO.S3.MXR | 2014-11-15T02:31:50.259999Z - ... | 1.0 Hz, 3600 samples
    SY.DBO.S3.MXT | 2014-11-15T02:31:50.259999Z - ... | 1.0 Hz, 3600 samples
    SY.DBO.S3.MXZ | 2014-11-15T02:31:50.259999Z - ... | 1.0 Hz, 3600 samples
    >>> print(synthetic)  # doctest: +ELLIPSIS +NORMALIZE_WHITESPACE
    3 Trace(s) in Stream:
    SY.DBO..LXR   | 2014-11-15T02:31:50.259999Z - ... | 1.0 Hz, 3600 samples
    SY.DBO..LXT   | 2014-11-15T02:31:50.259999Z - ... | 1.0 Hz, 3600 samples
    SY.DBO..LXZ   | 2014-11-15T02:31:50.259999Z - ... | 1.0 Hz, 3600 samples
    """
    path = os.path.join(
        os.path.dirname(inspect.getfile(inspect.currentframe())),
        "example_data")
    observed = obspy.read(os.path.join(path, "observed_processed.mseed"))
    observed.sort()
    synthetic = obspy.read(os.path.join(path, "synthetic_processed.mseed"))
    synthetic.sort()

    return observed, synthetic
Ejemplo n.º 27
0
    def test_writing_blockette_100(self):
        """
        Tests that blockette 100 is written correctly. It is only used if
        the sampling rate is higher than 32727 Hz or smaller than 1.0 /
        32727.0 Hz.
        """
        # Three traces, only the middle one needs it.
        tr = Trace(data=np.linspace(0, 100, 101))
        st = Stream(traces=[tr.copy(), tr.copy(), tr.copy()])

        st[1].stats.sampling_rate = 60000.0

        with io.BytesIO() as buf:
            st.write(buf, format="mseed")
            buf.seek(0, 0)
            st2 = read(buf)

        self.assertTrue(np.allclose(st[0].stats.sampling_rate, st2[0].stats.sampling_rate))
        self.assertTrue(np.allclose(st[1].stats.sampling_rate, st2[1].stats.sampling_rate))
        self.assertTrue(np.allclose(st[2].stats.sampling_rate, st2[2].stats.sampling_rate))

        st[1].stats.sampling_rate = 1.0 / 60000.0

        with io.BytesIO() as buf:
            st.write(buf, format="mseed")
            buf.seek(0, 0)
            st2 = read(buf)

        self.assertTrue(np.allclose(st[0].stats.sampling_rate, st2[0].stats.sampling_rate))
        self.assertTrue(np.allclose(st[1].stats.sampling_rate, st2[1].stats.sampling_rate))
        self.assertTrue(np.allclose(st[2].stats.sampling_rate, st2[2].stats.sampling_rate))
Ejemplo n.º 28
0
 def test_save_waveform_compressed(self):
     """
     Tests saving compressed and not unpacked bzip2 files to disk.
     """
     # initialize client
     client = Client(user='******')
     start = UTCDateTime(2008, 1, 1, 0, 0)
     end = start + 1
     # MiniSEED
     with NamedTemporaryFile(suffix='.bz2') as tf:
         mseedfile = tf.name
         client.save_waveforms(mseedfile, 'GE', 'APE', '', 'BHZ', start,
                               end, unpack=False)
         # check if compressed
         with open(mseedfile, 'rb') as fp:
             self.assertEqual(fp.read(2), b'BZ')
         # importing via read should work too
         read(mseedfile)
     # Full SEED
     with NamedTemporaryFile(suffix='.bz2') as tf:
         fseedfile = tf.name
         client.save_waveforms(fseedfile, 'GE', 'APE', '', 'BHZ', start,
                               end, format="FSEED", unpack=False)
         # check if compressed
         with open(fseedfile, 'rb') as fp:
             self.assertEqual(fp.read(2), b'BZ')
         # importing via read should work too
         read(fseedfile)
Ejemplo n.º 29
0
 def test_022_check_content(self):
     from obspy.core import read
     from numpy.testing import assert_allclose
     from ..api import connect, get_filters, get_station_pairs, \
         get_components_to_compute
     db = connect()
     for filter in get_filters(db):
         for components in get_components_to_compute(db):
             for (sta1, sta2) in get_station_pairs(db):
                 pair = "%s_%s_%s_%s" % (sta1.net, sta1.sta,
                                         sta2.net, sta2.sta)
                 tmp1 = os.path.join("STACKS",
                                     "%02i" % filter.ref,
                                     "001_DAYS",
                                     components,
                                     pair,
                                     "2010-09-01.MSEED")
                 tmp2 = os.path.join("STACKS",
                                     "%02i" % filter.ref,
                                     "001_DAYS",
                                     components,
                                     pair,
                                     "2010-09-01.SAC")
                 tmp1 = read(tmp1)
                 tmp2 = read(tmp2)
                 assert_allclose(tmp1[0].data, tmp2[0].data)
     db.close()
Ejemplo n.º 30
0
    def test_relcal_different_overlaps(self):
        """
        Tests using different window overlap percentages.

        Regression test for bug #1821.
        """
        st1 = read(os.path.join(self.path, 'ref_STS2'))
        st2 = read(os.path.join(self.path, 'ref_unknown'))
        calfile = os.path.join(self.path, 'STS2_simp.cal')

        def median_amplitude_plateau(freq, amp):
            # resulting response is pretty much flat in this frequency range
            return np.median(amp[(freq >= 0.3) & (freq <= 3)])

        # correct results using default overlap fraction of 0.5
        freq, amp, phase = rel_calib_stack(
            st1, st2, calfile, 20, smooth=10, overlap_frac=0.5,
            save_data=False)
        amp_expected = median_amplitude_plateau(freq, amp)
        for overlap in np.linspace(0.1, 0.9, 5):
            freq2, amp2, phase2 = rel_calib_stack(
                st1, st2, calfile, 20, smooth=10, overlap_frac=overlap,
                save_data=False)
            amp_got = median_amplitude_plateau(freq2, amp2)
            percentual_difference = abs(
                (amp_expected - amp_got) / amp_expected)
            # make sure results are close for any overlap choice
            self.assertTrue(percentual_difference < 0.01)
Ejemplo n.º 31
0
def write_correlations(event_list,
                       wavbase,
                       extract_len,
                       pre_pick,
                       shift_len,
                       lowcut=1.0,
                       highcut=10.0,
                       max_sep=8,
                       min_link=8,
                       cc_thresh=0.0,
                       plotvar=False,
                       debug=0):
    """
    Write a dt.cc file for hypoDD input for a given list of events.

    Takes an input list of events and computes pick refinements by correlation.
    Outputs two files, dt.cc and dt.cc2, each provides a different weight,
    dt.cc uses weights of the cross-correlation, and dt.cc2 provides weights
    as the square of the cross-correlation.

    :type event_list: list
    :param event_list: List of tuples of event_id (int) and sfile (String)
    :type wavbase: str
    :param wavbase: Path to the seisan wave directory that the wavefiles in the
                    S-files are stored
    :type extract_len: float
    :param extract_len: Length in seconds to extract around the pick
    :type pre_pick: float
    :param pre_pick: Time before the pick to start the correlation window
    :type shift_len: float
    :param shift_len: Time to allow pick to vary
    :type lowcut: float
    :param lowcut: Lowcut in Hz - default=1.0
    :type highcut: float
    :param highcut: Highcut in Hz - default=10.0
    :type max_sep: float
    :param max_sep: Maximum separation between event pairs in km
    :type min_link: int
    :param min_link: Minimum links for an event to be paired
    :type cc_thresh: float
    :param cc_thresh: Threshold to include cross-correlation results.
    :type plotvar: bool
    :param plotvar: To show the pick-correction plots, defualts to False.
    :type debug: int
    :param debug: Variable debug levels from 0-5, higher=more output.

    .. warning:: This is not a fast routine!

    .. warning::
        In contrast to seisan's corr routine, but in accordance with the
        hypoDD manual, this outputs corrected differential time.

    .. note::
        Currently we have not implemented a method for taking
        unassociated event objects and wavefiles.  As such if you have events \
        with associated wavefiles you are advised to generate Sfiles for each \
        event using the sfile_util module prior to this step.

    .. note::
        There is no provision to taper waveforms within these functions, if you
        desire this functionality, you should apply the taper before calling
        this.  Note the :func:`obspy.Trace.taper` functions.
    """
    from obspy.signal.cross_correlation import xcorr_pick_correction
    warnings.filterwarnings(action="ignore",
                            message="Maximum of cross correlation " +
                            "lower than 0.8: *")
    corr_list = []
    f = open('dt.cc', 'w')
    f2 = open('dt.cc2', 'w')
    k_events = len(list(event_list))
    for i, master in enumerate(event_list):
        master_sfile = master[1]
        if debug > 1:
            print('Computing correlations for master: %s' % master_sfile)
        master_event_id = master[0]
        master_event = read_nordic(master_sfile)[0]
        master_picks = master_event.picks
        master_ori_time = master_event.origins[0].time
        master_location = (master_event.origins[0].latitude,
                           master_event.origins[0].longitude,
                           master_event.origins[0].depth / 1000.0)
        master_wavefiles = readwavename(master_sfile)
        masterpath = glob.glob(wavbase + os.sep + master_wavefiles[0])
        if masterpath:
            masterstream = read(masterpath[0])
        if len(master_wavefiles) > 1:
            for wavefile in master_wavefiles:
                try:
                    masterstream += read(os.join(wavbase, wavefile))
                except:
                    raise IOError("Couldn't find wavefile")
                    continue
        for j in range(i + 1, k_events):
            # Use this tactic to only output unique event pairings
            slave_sfile = event_list[j][1]
            if debug > 2:
                print('Comparing to event: %s' % slave_sfile)
            slave_event_id = event_list[j][0]
            slave_wavefiles = readwavename(slave_sfile)
            try:
                slavestream = read(wavbase + os.sep + slave_wavefiles[0])
            except:
                raise IOError('No wavefile found: ' + slave_wavefiles[0] +
                              ' ' + slave_sfile)
            if len(slave_wavefiles) > 1:
                for wavefile in slave_wavefiles:
                    try:
                        slavestream += read(wavbase + os.sep + wavefile)
                    except IOError:
                        print('No waveform found: %s' %
                              (wavbase + os.sep + wavefile))
                        continue
            # Write out the header line
            event_text = '#' + str(master_event_id).rjust(10) +\
                str(slave_event_id).rjust(10) + ' 0.0   \n'
            event_text2 = '#' + str(master_event_id).rjust(10) +\
                str(slave_event_id).rjust(10) + ' 0.0   \n'
            slave_event = read_nordic(slave_sfile)[0]
            slave_picks = slave_event.picks
            slave_ori_time = slave_event.origins[0].time
            slave_location = (slave_event.origins[0].latitude,
                              slave_event.origins[0].longitude,
                              slave_event.origins[0].depth / 1000.0)
            if dist_calc(master_location, slave_location) > max_sep:
                if debug > 0:
                    print('Seperation exceeds max_sep: %s' %
                          (dist_calc(master_location, slave_location)))
                continue
            links = 0
            phases = 0
            for pick in master_picks:
                if not hasattr(pick, 'phase_hint') or \
                                len(pick.phase_hint) == 0:
                    warnings.warn('No phase-hint for pick:')
                    print(pick)
                    continue
                if pick.phase_hint[0].upper() not in ['P', 'S']:
                    warnings.warn('Will only use P or S phase picks')
                    print(pick)
                    continue
                    # Only use P and S picks, not amplitude or 'other'
                # Find station, phase pairs
                # Added by Carolin
                slave_matches = [
                    p for p in slave_picks if hasattr(p, 'phase_hint')
                    and p.phase_hint == pick.phase_hint and
                    p.waveform_id.station_code == pick.waveform_id.station_code
                ]

                if masterstream.select(station=pick.waveform_id.station_code,
                                       channel='*' +
                                       pick.waveform_id.channel_code[-1]):
                    mastertr = masterstream.\
                        select(station=pick.waveform_id.station_code,
                               channel='*' +
                               pick.waveform_id.channel_code[-1])[0]
                elif debug > 1:
                    print('No waveform data for ' +
                          pick.waveform_id.station_code + '.' +
                          pick.waveform_id.channel_code)
                    print(pick.waveform_id.station_code + '.' +
                          pick.waveform_id.channel_code + ' ' + slave_sfile +
                          ' ' + master_sfile)
                    break
                # Loop through the matches
                for slave_pick in slave_matches:
                    if slavestream.select(
                            station=slave_pick.waveform_id.station_code,
                            channel='*' +
                            slave_pick.waveform_id.channel_code[-1]):
                        slavetr = slavestream.\
                            select(station=slave_pick.waveform_id.station_code,
                                   channel='*' + slave_pick.waveform_id.
                                   channel_code[-1])[0]
                    else:
                        print('No slave data for ' +
                              slave_pick.waveform_id.station_code + '.' +
                              slave_pick.waveform_id.channel_code)
                        print(pick.waveform_id.station_code + '.' +
                              pick.waveform_id.channel_code + ' ' +
                              slave_sfile + ' ' + master_sfile)
                        break
                    # Correct the picks
                    try:
                        correction, cc =\
                            xcorr_pick_correction(
                                pick.time, mastertr, slave_pick.time,
                                slavetr, pre_pick, extract_len - pre_pick,
                                shift_len, filter="bandpass",
                                filter_options={'freqmin': lowcut,
                                                'freqmax': highcut},
                                plot=plotvar)
                        # Get the differential travel time using the
                        # corrected time.
                        # Check that the correction is within the allowed shift
                        # This can occur in the obspy routine when the
                        # correlation function is increasing at the end of the
                        # window.
                        if abs(correction) > shift_len:
                            warnings.warn('Shift correction too large, ' +
                                          'will not use')
                            continue
                        correction = (pick.time - master_ori_time) -\
                            (slave_pick.time + correction - slave_ori_time)
                        links += 1
                        if cc >= cc_thresh:
                            weight = cc
                            phases += 1
                            # added by Caro
                            event_text += pick.waveform_id.station_code.\
                                ljust(5) + _cc_round(correction, 3).\
                                rjust(11) + _cc_round(weight, 3).rjust(8) +\
                                ' ' + pick.phase_hint + '\n'
                            event_text2 += pick.waveform_id.station_code\
                                .ljust(5) + _cc_round(correction, 3).\
                                rjust(11) +\
                                _cc_round(weight * weight, 3).rjust(8) +\
                                ' ' + pick.phase_hint + '\n'
                            if debug > 3:
                                print(event_text)
                        else:
                            print('cc too low: %s' % cc)
                        corr_list.append(cc * cc)
                    except:
                        msg = "Couldn't compute correlation correction"
                        warnings.warn(msg)
                        continue
            if links >= min_link and phases > 0:
                f.write(event_text)
                f2.write(event_text2)
    if plotvar:
        plt.hist(corr_list, 150)
        plt.show()
    # f.write('\n')
    f.close()
    f2.close()
    return
from obspy import read, read_inventory

st = read("/path/to/IU_ULN_00_LH1_2015-07-18T02.mseed")
tr = st[0]
inv = read_inventory("/path/to/IU_ULN_00_LH1.xml")
tr.attach_response(inv)

pre_filt = [0.001, 0.005, 10, 20]
tr.remove_response(pre_filt=pre_filt, output="DISP", water_level=60, plot=True)
Ejemplo n.º 33
0


src_grd_ref      = 5
show_peak_info   = True
show_clean_hist  = False # this option is best used with a small control parameter and a high cln_iter value
min_relative_pow = -12         
enhance_vis      = True        
add_bg           = False                                                           
inter_mode       = 'none'      
area             = 7           
std_g            = 1           



st  = read('/Users/mgal/temp_SETA/3comp/2006.315*BHZ.SAC')
st0 = read('/Users/mgal/temp_SETA/3comp/2006.315*BHN.SAC')
st1 = read('/Users/mgal/temp_SETA/3comp/2006.315*BHE.SAC') 


st,st0,st1 = equalize(st,st0,st1)
nr = st0.count()
rx,ry = get_rxy_sac(nr,st0)   

# remove gain if known and response is flat in the frequency range of interest
st, st0, st1 = remove_gain(st,st0,st1,nr,gain=1)


nwin = int(st0[0].count()/nsamp)*2-1
dt = st0[0].stats.delta
freq = find/(nsamp*dt)
Ejemplo n.º 34
0
kappa_df = pd.read_csv('/Users/tnye/tsuquakes/flatfiles/vs30_kappa.csv')
kappa = kappa_df.Kappa.values

# Get list of station names
stn_list = []
for file in obs_wfs_list:
    stn = file.split('/')[-1].split('.')[0]
    stn_list.append(stn)

# Read in waveforms
obs_wfs = []
setK_wfs = []
varK_wfs = []

for i in range(len(stn_list)):
    obs_wfs.append(read(obs_wfs_list[i]))
    setK_wfs.append(read(setK_wfs_list[i]))
    varK_wfs.append(read(varK_wfs_list[i]))

# Get spectra amplitudes and frequencies
obs_freqs = []
obs_spec = []
setK_freqs = []
setK_spec = []
varK_freqs = []
varK_spec = []

for i in range(len(stn_list)):

    # Observed spectra
    amp_squared, freq = mtspec(obs_wfs[i][0].data,
Ejemplo n.º 35
0
import matplotlib.pyplot as plt
import matplotlib.dates as mdates

import obspy
from obspy.core import AttribDict
from obspy.imaging.cm import obspy_sequential
from obspy.signal.invsim import corn_freq_2_paz
from obspy.signal.array_analysis import array_processing

# Load data
st = obspy.read("http://examples.obspy.org/agfa.mseed")

# Set PAZ and coordinates for all 5 channels
st[0].stats.paz = AttribDict({
    'poles': [(-0.03736 - 0.03617j), (-0.03736 + 0.03617j)],
    'zeros': [0j, 0j],
    'sensitivity':
    205479446.68601453,
    'gain':
    1.0
})
st[0].stats.coordinates = AttribDict({
    'latitude': 48.108589,
    'elevation': 0.450000,
    'longitude': 11.582967
})

st[1].stats.paz = AttribDict({
    'poles': [(-0.03736 - 0.03617j), (-0.03736 + 0.03617j)],
    'zeros': [0j, 0j],
    'sensitivity':
Ejemplo n.º 36
0
def preprocess(station,
               channel,
               component,
               files,
               parallel=False,
               remove_ir=False,
               client='IRIS'):
    if not isinstance(files, list):
        files = [files]

    start_timer = dt.datetime.now()

    header = obs.Stream()
    for file in files:
        read_stream = obs.read(file, headonly=True)
        for trace in read_stream.traces:
            trace.file = file

        header.extend(read_stream)

    print('Finished reading headers for {}:{} in {}s'.format(
        station, channel, (dt.datetime.now() - start_timer).total_seconds()))

    header = header.select(station=station, channel=channel)
    if len(header.traces) == 0:
        print('No files to process for {sta}:{cmp}!'.format(sta=station,
                                                            cmp=component))
        return

    header.sort(keys=['starttime'])
    n_traces = len(header.traces)

    start_times = np.zeros(n_traces)
    end_times = np.zeros(n_traces)
    for i in range(n_traces):
        trace = header.traces[int(i)]
        start_times[i] = trace.stats.starttime.timestamp
        end_times[i] = trace.stats.endtime.timestamp

    start_date = obs.UTCDateTime(np.min(start_times)).date
    end_date = obs.UTCDateTime(np.max(end_times)).date

    print('Preprocessing {}:{} from {} to {}...'.format(
        station, channel, start_date, end_date))

    day_data = DayData(
        obs.UTCDateTime(np.min(start_times)).date, station, component)
    _write_dirs(day_data, raw=True)

    start_timer = dt.datetime.now()
    dates = []
    date = start_date
    while date <= end_date:
        dates.append(date)
        date += dt.timedelta(days=1)

    client = Client(client)
    if parallel:
        #n_proc = np.min([len(dates), mp.cpu_count() / 2]) # takes a lot of memory
        pool = mp.Pool(processes=int(cfg.n_proc))
        pool.map_async(
            partial(_preprocess_wrap,
                    header=header,
                    station=station,
                    channel=channel,
                    component=component,
                    remove_ir=remove_ir,
                    client=client), dates)

        pool.close()
        pool.join()

    else:
        for date in dates:
            _preprocess(date, header, station, channel, component, client)

    #print('Finished reading headers for {}:{} in {}s'.format(station, channel, (dt.datetime.now() - start_timer).total_seconds()))
    print('Done in {}s!'.format(
        (dt.datetime.now() - start_timer).total_seconds()))
Ejemplo n.º 37
0
def _preprocess(date, header, station, channel, component, remove_ir, client):
    start_timer = dt.datetime.now()
    start_comp = np.array([
        trace.stats['starttime'] < obs.UTCDateTime(date + dt.timedelta(days=1))
        for trace in header.traces
    ],
                          dtype=np.bool)
    end_comp = np.array([
        trace.stats['endtime'] > obs.UTCDateTime(date)
        for trace in header.traces
    ],
                        dtype=np.bool)

    header_day = header.copy()
    header_day.traces = [
        trace for trace, test in zip(header.traces,
                                     np.logical_and(start_comp, end_comp))
        if test
    ]

    raw_data = obs.Stream()
    load_files = []
    for trace in header_day.traces:
        if any([file == trace.file for file in load_files]):
            continue
        else:
            raw_data.extend(obs.read(trace.file))
            load_files.append(trace.file)

    day_data = DayData(date, station, component)
    start_trace = obs.UTCDateTime(date)
    end_trace = start_trace + dt.timedelta(days=1) - 1. / cfg.sampling_rate

    load_time = (dt.datetime.now() - start_timer).total_seconds()

    # next bit is just to reseparate traces whose gaps were filled with dirty zeros
    nonzero_data = obs.Stream()
    for trace in raw_data.traces:
        if len(nonzero_data) > cfg.max_traces:
            break

        if np.sum(trace.data) == 0.:
            continue

        bounded_iszero = np.hstack([[0], trace.data == 0, [0]])
        iszero_diff = np.diff(bounded_iszero)
        zeros_start, = np.where(iszero_diff > 0)
        zeros_stop, = np.where(iszero_diff < 0)
        seq_len = zeros_stop - zeros_start
        which_seq, = np.where(
            seq_len >
            (1. /
             trace.stats.delta))  # remove zero sequences longer than 1 second

        if which_seq.size > 0:
            mask = np.zeros(trace.data.shape)
            for j in range(which_seq.size):
                start_mask = int(zeros_start[which_seq[j]])
                stop_mask = int(zeros_stop[which_seq[j]])
                mask[start_mask:stop_mask] = 1

            trace.data = np.ma.masked_where(mask, trace.data)
            nonzero_data.extend(trace.split())
        else:
            nonzero_data.extend([trace])

    nonzero_time = (dt.datetime.now() -
                    start_timer).total_seconds() - load_time

    if len(nonzero_data) == 0:
        return

    if len(nonzero_data) > cfg.max_traces:
        print(
            'More than {} gaps, skipping {} after {:.2f}s (load:{:.2f}s clean:{:.2f}s)!'
            .format(cfg.max_traces, date,
                    (dt.datetime.now() - start_timer).total_seconds(),
                    load_time, nonzero_time))
        return

    deltas = np.zeros((len(nonzero_data.traces)))
    for i in range(deltas.size):
        deltas[i] = nonzero_data.traces[i].stats.delta

    delta = stats.mode(deltas).mode
    merged = obs.Trace(data=np.zeros(86400 *
                                     int(1. / delta), dtype=np.float32))
    merged.stats.sampling_rate = cfg.sampling_rate
    merged.stats.delta = delta
    merged.stats.starttime = start_trace
    merged.stats.station = station
    merged.stats.channel = channel
    merged.stats.network = header.traces[0].stats['network']
    merged.stats.location = header.traces[0].stats['location']
    merged.mask = np.zeros(merged.stats.npts, dtype=np.int32)

    for trace in nonzero_data.traces:
        to_merge = synchronize(trace, start_trace)
        to_merge = to_merge.slice(starttime=start_trace, endtime=end_trace)

        to_merge.mask = np.ones(to_merge.stats.npts, dtype=np.int32)
        if to_merge.stats.delta == delta:
            merged = mergeStrict(merged, to_merge)

    merge_time = (dt.datetime.now() -
                  start_timer).total_seconds() - load_time - nonzero_time

    percent_gap = float(np.sum(merged.mask)) / merged.mask.size
    percent_gap = (1 - percent_gap)

    if percent_gap < cfg.gap_limit:
        merged = fill_gaps(merged)

        to_write = obs.Stream()
        to_write.extend([merged])

        if remove_ir:
            inventory = client.get_stations(
                starttime=obs.UTCDateTime(date),
                endtime=obs.UTCDateTime(date + dt.timedelta(days=1)),
                network=header_day.traces[0].stats['network'],
                sta=station,
                channel=header_day.traces[0].stats['channel'],
                level="response")
            to_write.attach_response(inventory)
            to_write.remove_response(
                zero_mean=True,
                pre_filt=[
                    cfg.pre_min1, cfg.pre_min2, cfg.pre_max1, cfg.pre_max2
                ],
                taper=True,
                taper_fraction=(1. / cfg.pre_min2) / 86400)

        sampling_rate = 1. / delta
        if sampling_rate != cfg.sampling_rate:
            if np.fmod(sampling_rate, cfg.sampling_rate) == 0:
                to_write.decimate(int(sampling_rate / cfg.sampling_rate))
            else:
                print(
                    'Cannot perform simple decimation for {} {}:{} (now: {}Hz; target: {}Hz), skipping file!'
                    .format(date, raw_data.station, raw_data.component,
                            sampling_rate, cfg.sampling_rate))
                return

        preprocess_time = (dt.datetime.now() - start_timer).total_seconds(
        ) - load_time - nonzero_time - merge_time

        day_data.components = [component]
        day_data.traces = to_write
        day_data.operational = {station: True}
        day_data.loaded = True
        day_data.write(raw=True)

        print(
            'Finished preprocessing for {} ({} traces) in {:.2f}s (load:{:.2f}s clean:{:.2f}s merge:{:.2f}s preprocess:{:.2f}s)'
            .format(date, len(nonzero_data),
                    (dt.datetime.now() - start_timer).total_seconds(),
                    load_time, nonzero_time, merge_time, preprocess_time))

    else:
        print(
            'Too much of a gap: {}%, skipping {} after {:.2f}s (load:{:.2f}s clean:{:.2f}s merge:{:.2f}s)'
            .format(percent_gap, date,
                    (dt.datetime.now() - start_timer).total_seconds(),
                    load_time, nonzero_time, merge_time))
Ejemplo n.º 38
0
def st_obs():
    """
    Raw observed waveforms from station NZ.BFZ.HH? for New Zealand event
    2018p130600 (GeoNet event id)
    """
    return read("./test_data/test_obs_data_NZ_BFZ_2018p130600.ascii")
Ejemplo n.º 39
0
 def align(self):
     if os.path.exists('traces.mseed'):
         self._traces = read('traces.mseed')
Ejemplo n.º 40
0
def st_syn():
    """
    Synthetic data stream generated using Specfem3D and focal mechanism for
    2018p130600. Minimum resolved period roughly 10s.
    """
    return read("./test_data/test_syn_data_NZ_BFZ_2018p130600.ascii")
Ejemplo n.º 41
0
from obspy import read
from glob import glob

files = glob(u'/Users/dmelgar/Puebla2017/strong_motion/raw/*e170919.s13')
path_out = u'/Users/dmelgar/Puebla2017/strong_motion/sac_noproc/'

for k in range(len(files)):
    st = read(files[k])

    print st

    sta = st[0].stats.station

    chan = st[0].stats.channel
    st[0].write(path_out + sta + '.' + chan + '.sac', format='SAC')

    chan = st[1].stats.channel
    st[1].write(path_out + sta + '.' + chan + '.sac', format='SAC')

    chan = st[2].stats.channel
    st[2].write(path_out + sta + '.' + chan + '.sac', format='SAC')
Ejemplo n.º 42
0
def load_waveform(client, waveform):
    """
    Download the given waveform data and generate an image. This is a standalone function so we can
    run it in a separate thread. This modifies the waveform entry and returns a dummy value, or
    raises an exception on any error.
    """
    plot_width = 600
    plot_height = 120

    waveform_id = waveform.waveform_id
    LOGGER.debug("Loading waveform: %s (%s)", waveform_id, QtCore.QThread.currentThreadId())
#     LOGGER.debug("Adding slowness")
#     QtCore.QThread.currentThread().sleep(2)

    try:
        waveform.prepare()

        if waveform.image_exists:
            # No download needed
            LOGGER.info("Waveform %s already has an image" % waveform_id)
            return True

        mseedFile = waveform.mseed_path
        LOGGER.debug("%s save as MiniSEED", waveform_id)

        # Load data from disk or network as appropriate
        if os.path.exists(mseedFile):
            LOGGER.info("Loading waveform data for %s from %s", waveform_id, mseedFile)
            st = obspy.read(mseedFile)
        else:
            (network, station, location, channel) = waveform.sncl.split('.')
            service_url = get_service_url(client, 'dataselect', {
                "network": network, "station": station, "location": location, "channel": channel,
                "starttime": waveform.start_time, "endtime": waveform.end_time,
            })
            LOGGER.info("Retrieving waveform data for %s from %s", waveform_id, service_url)
            st = client.get_waveforms(
                network, station, location, channel, waveform.start_time, waveform.end_time)
            # Write to file
            st.write(mseedFile, format="MSEED")

        # Generate image if necessary
        imageFile = waveform.image_path
        if not os.path.exists(imageFile):
            LOGGER.debug('Plotting waveform image to %s', imageFile)
            # In order to really customize the plotting, we need to return the figure and modify it
            h = st.plot(size=(plot_width, plot_height), handle=True)
            # Resize the subplot to a hard size, because otherwise it will do it inconsistently
            h.subplots_adjust(bottom=.2, left=.1, right=.95, top=.95)
            # Remove the title
            for c in h.get_children():
                if isinstance(c, matplotlib.text.Text):
                    c.remove()
            # Save with transparency
            h.savefig(imageFile)
            matplotlib.pyplot.close(h)

        waveform.check_files()

    except Exception as e:
        # Most common error is "no data" TODO: see https://github.com/obspy/obspy/issues/1656
        if str(e).startswith("No data"):
            waveform.error = NO_DATA_ERROR
            # Deselect when no data available
            waveform.keep = False
        else:
            waveform.error = str(e)
        # Reraise the exception to signal an error to the caller
        raise

    finally:
        # Mark as finished loading
        waveform.loading = False

    return True
Ejemplo n.º 43
0
    def test_ppsd_w_iris(self):
        # Bands to be used this is the upper and lower frequency band pairs
        fres = zip([0.1, 0.05], [0.2, 0.1])

        file_data_anmo = os.path.join(self.path, 'IUANMO.seed')
        # Read in ANMO data for one day
        st = read(file_data_anmo)

        # Use a canned ANMO response which will stay static
        paz = {
            'gain':
            86298.5,
            'zeros': [0, 0],
            'poles': [
                -59.4313, -22.7121 + 27.1065j, -22.7121 + 27.1065j, -0.0048004,
                -0.073199
            ],
            'sensitivity':
            3.3554 * 10**9
        }

        # Make an empty PPSD and add the data
        # use highest frequency given by IRIS Mustang noise-pdf web service
        # (0.475683 Hz == 2.10224036 s) as center of first bin, so that we
        # end up with the same bins.
        ppsd = PPSD(st[0].stats, paz, period_limits=(2.10224036, 1400))
        ppsd.add(st)
        ppsd.calculate_histogram()

        # Get the 50th percentile from the PPSD
        (per, perval) = ppsd.get_percentile(percentile=50)
        perinv = 1 / per

        # Read in the results obtained from a Mustang flat file
        file_data_iris = os.path.join(self.path, 'IRISpdfExample')
        data = np.genfromtxt(file_data_iris,
                             comments='#',
                             delimiter=',',
                             dtype=[(native_str("freq"), np.float64),
                                    (native_str("power"), np.int32),
                                    (native_str("hits"), np.int32)])
        freq = data["freq"]
        power = data["power"]
        hits = data["hits"]
        # cut data to same period range as in the ppsd we computed
        # (Mustang returns more long periods, probably due to some zero padding
        # or longer nfft in psd)
        num_periods = len(ppsd.period_bin_centers)
        freqdistinct = np.array(sorted(set(freq), reverse=True)[:num_periods])
        # just make sure that we compare the same periods in the following
        # (as we access both frequency arrays by indices from now on)
        np.testing.assert_allclose(freqdistinct,
                                   1 / ppsd.period_bin_centers,
                                   rtol=1e-4)

        # For each frequency pair we want to compare the mean of the bands
        for fre in fres:
            # determine which bins we want to compare
            mask = (fre[0] < perinv) & (perinv < fre[1])

            # Get the values for the bands from the PPSD
            per_val_good_obspy = perval[mask]

            percenlist = []
            # Now we sort out all of the data from the IRIS flat file
            # We will loop through the frequency values and compute a
            # 50th percentile
            for curfreq in freqdistinct[mask]:
                mask_ = curfreq == freq
                tempvalslist = np.repeat(power[mask_], hits[mask_])
                percenlist.append(np.percentile(tempvalslist, 50))
            # Here is the actual test
            np.testing.assert_allclose(np.mean(per_val_good_obspy),
                                       np.mean(percenlist),
                                       rtol=0.0,
                                       atol=1.2)
Ejemplo n.º 44
0
    def read_mseed_array(self, fname, stations, amplitude=False, remove_resp=True):

        mseed = obspy.read(fname)
        if self.highpass_filter == 0:
            try:
                mseed = mseed.detrend("spline", order=2, dspline=5 * mseed[0].stats.sampling_rate)
            except:
                logging.error(f"Error: spline detrend failed at file {fname}")
                mseed = mseed.detrend("demean")
        else:
            mseed = mseed.filter("highpass", freq=self.highpass_filter)
        mseed = mseed.merge(fill_value=0)
        starttime = min([st.stats.starttime for st in mseed])
        endtime = max([st.stats.endtime for st in mseed])
        mseed = mseed.trim(starttime, endtime, pad=True, fill_value=0)

        for i in range(len(mseed)):
            if mseed[i].stats.sampling_rate != self.config.sampling_rate:
                logging.warning(
                    f"Resampling {mseed[i].id} from {mseed[i].stats.sampling_rate} to {self.config.sampling_rate} Hz"
                )
                mseed[i] = mseed[i].interpolate(self.config.sampling_rate, method="linear")

        order = ['3', '2', '1', 'E', 'N', 'Z']
        order = {key: i for i, key in enumerate(order)}
        comp2idx = {"3": 0, "2": 1, "1": 2, "E": 0, "N": 1, "Z": 2}

        nsta = len(stations)
        nt = len(mseed[0].data)
        data = []
        station_id = []
        t0 = []
        raw_amp = []
        for i in range(nsta):
            trace_data = np.zeros([nt, self.config.n_channel], dtype=self.dtype)
            if amplitude:
                trace_amp = np.zeros([nt, self.config.n_channel], dtype=self.dtype)
            empty_station = True
            sta = stations.iloc[i]["station"]
            comp = stations.iloc[i]["component"].split(",")
            if amplitude:
                resp = stations.iloc[i]["response"].split(",")

            for j, c in enumerate(sorted(comp, key=lambda x: order[x[-1]])):

                resp_j = resp[j]
                if len(comp) != 3:  ## less than 3 component
                    j = comp2idx[c]

                if len(mseed.select(id=sta + c)) == 0:
                    print(f"Empty trace: {sta+c} {starttime}")
                    continue
                else:
                    empty_station = False

                tmp = mseed.select(id=sta + c)[0].data.astype(self.dtype)
                trace_data[: len(tmp), j] = tmp[:nt]
                if amplitude:
                    if stations.iloc[i]["unit"] == "m/s**2":
                        tmp = mseed.select(id=sta + c)[0]
                        tmp = tmp.integrate()
                        tmp = tmp.filter("highpass", freq=1.0)
                        tmp = tmp.data.astype(self.dtype)
                        trace_amp[: len(tmp), j] = tmp[:nt]
                    elif stations.iloc[i]["unit"] == "m/s":
                        tmp = mseed.select(id=sta + c)[0].data.astype(self.dtype)
                        trace_amp[: len(tmp), j] = tmp[:nt]
                    else:
                        print(
                            f"Error in {stations.iloc[i]['station']}\n{stations.iloc[i]['unit']} should be m/s**2 or m/s!"
                        )
                if amplitude and remove_resp:
                    #trace_amp[:, j] /= float(resp[j])
                    trace_amp[:, j] /= float(resp_j)
                    
            if not empty_station:
                data.append(trace_data)
                if amplitude:
                    raw_amp.append(trace_amp)
                station_id.append(sta)
                t0.append(starttime.strftime("%Y-%m-%dT%H:%M:%S.%f")[:-3])

        data = np.stack(data)
        if len(data.shape) == 3:
            data = data[:, :, np.newaxis, :]
        if amplitude:
            raw_amp = np.stack(raw_amp)
            if len(raw_amp.shape) == 3:
                raw_amp = raw_amp[:, :, np.newaxis, :]

        if amplitude:
            meta = {"data": data, "t0": t0, "station_id": station_id, "fname": station_id,  "raw_amp": raw_amp}
        else:
            meta = {"data": data, "t0": t0, "station_id": station_id, "fname": station_id}
        return meta
Ejemplo n.º 45
0
def _prepare_continuous_data(datadir, outdir, inv, chalst=['LHZ', 'LHE', 'LHN'], remove_response=False, verbose=True):
    wavepfx = datadir+'/'
    for network in inv.networks:
        net_code    = network.code
        for station in network.stations:
            sta_code    = station.code
            stla    = station.latitude; stlo    = station.longitude
            for cha in chalst:
                # # # if not remove_response:
                # # #     wfpattern   = datadir+'/raw/'+net_code+'.'+sta_code+'.*'+cha
                # # #     wfLst       = glob.glob(wfpattern)
                # # #     if len(wfLst) == 0: continue
                # # #     else:
                # # #         wfname  = wfLst[0]
                # # # else:
                # # #     wfpattern   = datadir+'/processed/'+net_code+'.'+sta_code+'.*'+cha
                # # #     wfLst       = glob.glob(wfpattern)
                # # #     if len(wfLst) == 0: continue
                # # #     else:
                # # #         wfname  = wfLst[0]
                wfpattern   = datadir+'/raw/'+net_code+'.'+sta_code+'.*'+cha
                wfLst       = glob.glob(wfpattern)
                if len(wfLst) == 0: continue
                else:
                    wfname  = wfLst[0]        
                tr          = obspy.read(wfname)[0]
                if tr.stats.npts*tr.stats.delta < 3600.*10.:
                    print 'Too many holes, skipping data: '+ wfname
                    continue
                temp_time   = tr.stats.starttime+1000.
                stime       = obspy.UTCDateTime(temp_time.date)+1.
                etime       = stime+3600.*24.-1.
                npts        = (3600.*24.-1.)/tr.stats.delta
                if verbose: print wfname
                tr.interpolate(1./tr.stats.delta, starttime=stime, npts=npts)
                # # # tr.trim(starttime=stime, endtime=etime)
                tr.stats.sac={}
                tr.stats.sac['stlo'] = stlo; tr.stats.sac['stla'] = stla
                year    = stime.year; month = mondict[stime.month]; day = stime.day
                doutdir = outdir+'/'+str(year)+'.'+month+'/'+str(year)+'.'+month+'.'+str(day)
                if not os.path.isdir(doutdir):
                    os.makedirs(doutdir)
                tempstr         = wfname.split('/')[-1]
                loc_code        = tempstr.split('.')[-2]
                if not remove_response:
                    outsacfname     = doutdir+'/'+str(year)+'.'+month+'.'+str(day)+'.'+sta_code+'.'+cha+'.SAC'
                    outrespfname    = doutdir+'/RESP.'+tempstr
                    client          = obspy.clients.iris.Client()
                    try:
                        client.resp(network=net_code, station=sta_code, channel=cha, location=loc_code, starttime=stime, endtime=etime, filename=outrespfname)
                        tr.write(outsacfname, format='SAC')
                    except:
                        continue
                else:
                    respfname       = datadir+'/resp/STXML.'+tempstr
                    if not os.path.isfile(respfname):
                        print 'WARNNING: No resp file for : '+str(year)+'.'+month+'.'+str(day)+'.'+cha+'.SAC'
                        continue
                    inv         = obspy.read_inventory(respfname)
                    tr.attach_response(inv)
                    tr.detrend()
                    tr.remove_response(pre_filt=(0.001, 0.005, 1, 100.0))
                    outsacfname = doutdir+'/ft_'+str(year)+'.'+month+'.'+str(day)+'.'+sta_code+'.'+cha+'.SAC'
                    tr.write(outsacfname, format='SAC')
                    with open(outsacfname+'_rec', 'wb') as f:
                        f.writelines('0\t84001\n') 
Ejemplo n.º 46
0
    def test_ppsd_w_iris_against_obspy_results(self):
        """
        Test against results obtained after merging of #1108.
        """
        # Read in ANMO data for one day
        st = read(os.path.join(self.path, 'IUANMO.seed'))

        # Read in metadata in various different formats
        paz = {
            'gain':
            86298.5,
            'zeros': [0, 0],
            'poles': [
                -59.4313, -22.7121 + 27.1065j, -22.7121 + 27.1065j, -0.0048004,
                -0.073199
            ],
            'sensitivity':
            3.3554 * 10**9
        }
        resp = os.path.join(self.path, 'IUANMO.resp')
        parser = Parser(os.path.join(self.path, 'IUANMO.dataless'))
        inv = read_inventory(os.path.join(self.path, 'IUANMO.xml'))

        # load expected results, for both only PAZ and full response
        filename_paz = os.path.join(self.path, 'IUANMO_ppsd_paz.npz')
        results_paz = PPSD.load_npz(filename_paz, metadata=None)
        filename_full = os.path.join(self.path, 'IUANMO_ppsd_fullresponse.npz')
        results_full = PPSD.load_npz(filename_full, metadata=None)

        # Calculate the PPSDs and test against expected results
        # first: only PAZ
        ppsd = PPSD(st[0].stats, paz)
        ppsd.add(st)
        # commented code to generate the test data:
        # ## np.savez(filename_paz,
        # ##          **dict([(k, getattr(ppsd, k))
        # ##                  for k in PPSD.NPZ_STORE_KEYS]))
        for key in PPSD.NPZ_STORE_KEYS_ARRAY_TYPES:
            np.testing.assert_allclose(getattr(ppsd, key),
                                       getattr(results_paz, key),
                                       rtol=1e-5)
        for key in PPSD.NPZ_STORE_KEYS_LIST_TYPES:
            for got, expected in zip(getattr(ppsd, key),
                                     getattr(results_paz, key)):
                np.testing.assert_allclose(got, expected, rtol=1e-5)
        for key in PPSD.NPZ_STORE_KEYS_SIMPLE_TYPES:
            if key in ["obspy_version", "numpy_version", "matplotlib_version"]:
                continue
            self.assertEqual(getattr(ppsd, key), getattr(results_paz, key))
        # second: various methods for full response
        for metadata in [parser, inv, resp]:
            ppsd = PPSD(st[0].stats, metadata)
            ppsd.add(st)
            # commented code to generate the test data:
            # ## np.savez(filename_full,
            # ##          **dict([(k, getattr(ppsd, k))
            # ##                  for k in PPSD.NPZ_STORE_KEYS]))
            for key in PPSD.NPZ_STORE_KEYS_ARRAY_TYPES:
                np.testing.assert_allclose(getattr(ppsd, key),
                                           getattr(results_full, key),
                                           rtol=1e-5)
            for key in PPSD.NPZ_STORE_KEYS_LIST_TYPES:
                for got, expected in zip(getattr(ppsd, key),
                                         getattr(results_full, key)):
                    np.testing.assert_allclose(got, expected, rtol=1e-5)
            for key in PPSD.NPZ_STORE_KEYS_SIMPLE_TYPES:
                if key in [
                        "obspy_version", "numpy_version", "matplotlib_version"
                ]:
                    continue
                self.assertEqual(getattr(ppsd, key),
                                 getattr(results_full, key))
Ejemplo n.º 47
0
	new_node[i3,3]=node[i3,3]+translation_z
	new_node[i3,4]=node[i3,4]
	new_node[i3,5]=node[i3,5]

### conduct rotational operation, here our rotate order is z ,x ,y
new_node=rotation_z(new_node,rotation_base_station_x,rotation_base_station_y,rotation_base_station_z,z_rotation)
new_node=rotation_x(new_node,rotation_base_station_x,rotation_base_station_y,rotation_base_station_z,x_rotation)
new_node=rotation_y(new_node,rotation_base_station_x,rotation_base_station_y,rotation_base_station_z,y_rotation)

#######################################  Ending transfer the coordinates of DRM layer ######################################################################################## 

####### Sampled time information #############

file_path = sw4_motion_path + "/E_000_000_000.x";   ### Open any file can get these information 

sac_info = read(file_path, debug_headers=True); 

No_time_step = sac_info[0].data.shape[0]; 

original_time_step = sac_info[0].stats.delta; 

sampled_time_step = original_time_step*sample_time_step_interval;

sampled_No_time_step= int(math.floor((No_time_step-1)/sample_time_step_interval)+1);

Time=[i*sampled_time_step for i in xrange(0,sampled_No_time_step)];

h5file=h5py.File(DRM_input,"r+")

h5file.create_dataset("Time", data=Time);
Ejemplo n.º 48
0
        for kday in range(len(folders)):

            #Where are the data
            efile = folders[kday] + '/' + site + '.LXE.mseed'
            nfile = folders[kday] + '/' + site + '.LXN.mseed'
            zfile = folders[kday] + '/' + site + '.LXZ.mseed'

            #Check if there is data
            if exists(efile) == True:

                #Update boolean that checks whether data was found
                data_found = True

                #First read the mseed file, we will need this for creating the PPSD in
                #case it doesn;t yet exist
                e = read(efile)
                n = read(nfile)
                z = read(zfile)

                #on the first time that data is found, check if the PPSD exists,
                #if not then create it
                if ppsd_init == False:  #i.e. haven't checked for existence of ppsd

                    Eppsd_file = ppsd_dir + site + '.LXE.ppsd.npz'
                    Nppsd_file = ppsd_dir + site + '.LXN.ppsd.npz'
                    Zppsd_file = ppsd_dir + site + '.LXZ.ppsd.npz'

                    #Does the ppsd object exist?
                    if exists(Eppsd_file) == True:

                        #Load the previously saved PPSDs
Ejemplo n.º 49
0
# !head data/station_BFO.xml

print(inventory)

network = inventory[0]
print(network)

station = network[0]
print(station)

channel = station[0]
print(channel)

print(channel.response)

st = obspy.read("./data/waveform_PFO.mseed")
print(st)

inv = obspy.read_inventory("./data/station_PFO.xml", format="STATIONXML")

print(st[0].stats)

# - the instrument response can be deconvolved from the waveform data using the convenience method **`Stream.remove_response()`**
# - evalresp is used internally to calculate the instrument response

st.plot()
st.remove_response(inventory=inv)
st.plot()

# - several options can be used to specify details of the deconvolution (water level, frequency domain prefiltering), output units (velocity/displacement/acceleration), demeaning, tapering and to specify if any response stages should be omitted
Ejemplo n.º 50
0
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Fri Sep 29 12:19:56 2017

@author: horas
"""

import obspy
import numpy as np
import matplotlib.pylab as plt

st = obspy.read ("synt_full.MSEED")
obs = obspy.read("WSI_full.MSEED")       #change file name of file.MSEED

#####################################figure 1######################################
plt.figure(1)                        
fig = plt.figure(1)
fig.suptitle('WSI', fontsize=14)    #change the title of the each figure

plt.rc('xtick', labelsize=6)
plt.rc('ytick', labelsize=6)
plt.rc('legend',fontsize=8)
#------------------------------------subplot 1---------------------------
disp = plt.subplot(2,1,1)           
disp.plot(st[0].data, color='red', label='Seismogram sintetik', linewidth=0.5)
disp.set_ylabel('Pergeseran (m)', fontsize=7)
disp.legend()
#------------------------------------subplot 2---------------------------
vel_obs = plt.subplot(2,1,2)        
vel_obs.plot(obs[0].data, label='Seismogram observasi', linewidth=0.5)
Ejemplo n.º 51
0
    strmori = Stream()
    stalist1 = []

    idx = np.arange(nsta)
    for ista in range(len(idx)):

        stlat = stapos[idx[ista], 0]  #inv.get_coordinates(stid)['latitude']
        stlon = stapos[idx[ista], 1]  #inv.get_coordinates(stid)['longitude']

        dis1 = distaz.DistAz(evlat1, evlon1, stlat, stlon)

        if dis1.delta < mindist or dis1.delta > maxdist:
            continue

        trace = '/'.join([eqdir, evname1, stalist[idx[ista]]])
        strm = obspy.read(trace)
        tr = strm[0]
        tr.resample(rsample)
        tr.stats.coordinates = AttribDict({
            'latitude': stlat,
            'longitude': stlon,
            'elevation': 0
        })

        parr = ftelep(dis1.delta, evdep1)[0]
        tr.stats.distance = dis1.delta  #inc_angle
        tr.trim(evtime1 + parr - trimb,
                evtime1 + parr + trima,
                pad=True,
                fill_value=0)
Ejemplo n.º 52
0
 seislist = glob.glob(dir + '*PICKLE')
 azis = []
 dists = []     
 
 plt.figure(figsize=(11.69,8.27))
 for i in range(plot_row*plot_column):
     if i == 0:
         ax0=plt.subplot(plot_row, plot_column, i+1)
     else:
         plt.subplot(plot_row, plot_column, i+1)
     print('i=',i)       
     # Loop through seismograms
     count = 0
     for s in [920,39,123,171]:#range(0,len(seislist),10):    
         #print(s, '/', len(seislist))          
         seis = read(seislist[s],format='PICKLE') # read seismogram
         s_name = os.path.splitext(os.path.split(seislist[s])[1])[0]
         dists.append(seis[0].stats['dist'])# List all distances
         #    azis.append(seis[0].stats['az'])      # List all azimuths
         #    print('Azimuth: ', seis[0].stats['az'],'Distance: ',seis[0].stats['dist'])
         seistoplot = seis.select(channel = component)[0]           
         # plot synthetics             
         if syn:
             seissyn = seis.select(channel = component)[0]           
         # Plot seismograms
         #print(seis[0].stats.traveltimes['Sdiff'])            
         Phase = ['S','Sdiff']
         for x in range (0,2):
             if  seis[0].stats.traveltimes[Phase[x]]!=None:
                 phase = Phase[x]
         # Filter data    
        parsers.update(parsers_)
    pbar.finish()

    # Parse all waveform files.
    widgets = [
        'Indexing waveform files...     ',
        progressbar.Percentage(), ' ',
        progressbar.Bar()
    ]
    pbar = progressbar.ProgressBar(widgets=widgets,
                                   maxval=len(WAVEFORM_FILES)).start()
    waveform_index = {}
    # Read all waveform files.
    for _i, waveform in enumerate(WAVEFORM_FILES):
        pbar.update(_i)
        st = read(waveform)
        for trace in st:
            if not trace.id in waveform_index:
                waveform_index[trace.id] = []
            waveform_index[trace.id].append( \
                {"filename": waveform,
                 "starttime": trace.stats.starttime,
                 "endtime": trace.stats.endtime})
    pbar.finish()

    # Define it inplace to create a closure for the waveform_index dictionary
    # because I am too lazy to fix the global variable issue right now...
    def get_corresponding_stream(waveform_id, pick_time, padding=1.0):
        """
        Helper function to find a requested waveform in the previously created
        waveform_index file.
Ejemplo n.º 54
0
                     llcrnrlat=inventory[0][0].latitude - 0.008,
                     urcrnrlon=inventory[0][0].longitude + 0.011,
                     urcrnrlat=inventory[0][0].latitude + .008,
                     epsg=4326,
                     projection='merc',
                     suppress_ticks=False,
                     resolution='h',
                     ax=axins)
    fillmap(f, inventory, zoom=zoom, xpixels=xpixels, dpi=dpi)
    mark_inset(ax, axins, loc1=2, loc2=4, fc="none", ec="0.5")


if __name__ == "__main__":
    try:
        inventory = read_inventory(argv[-1])
        stream = read(argv[1])
    except:
        print('cannot find local storage')
        pass
    try:
        client = Client(argv[-1])
    except:
        print('cannot connect fdsnws url')
        pass
    try:
        client = Client('http://localhost:8080/')
    except:
        print('cannot connect local fdsnws url')
        pass
    try:
        mseedid = {
Ejemplo n.º 55
0
def _add_to_adsf_file(f, files, tag, verbose=False):
    count = 0

    event_handler = _EventContainer()
    channel_information = {}
    min_starttime = None
    max_endtime = None

    for filename in files:
        if not verbose:
            print(".", end="")
            sys.stdout.flush()
        else:
            print("Attempting to add '%s'." % filename)
        try:
            tr = obspy.read(filename, format="SAC")[0]
            if verbose:
                print("Success.")
        except:
            print("\nFailed to read '%s' as a SAC file." % filename)
            continue

        # Get to coordinates if possible:
        try:
            coords = (tr.stats.sac.stlo, tr.stats.sac.stla, tr.stats.sac.stel,
                      tr.stats.sac.stdp)
            channel_information[tr.id] = coords
        except AttributeError:
            pass

        s, e = tr.stats.starttime, tr.stats.endtime
        if min_starttime is None or s < min_starttime:
            min_starttime = s
        if max_endtime is None or e < max_endtime:
            max_endtime = e

        try:
            ev = (tr.stats.sac.evlo, tr.stats.sac.evla, tr.stats.sac.evdp,
                  tr.stats.sac.o)
        except AttributeError:
            ev = None

        if ev:
            event_id = event_handler.get_resource_identifier(
                latitude=ev[1], longitude=ev[0], depth=ev[2],
                origin_time=get_sac_reftime(tr.stats.sac) + ev[3])
        else:
            event_id = None

        f.add_waveforms(tr, tag=tag, event_id=event_id)
        count += 1

    # Add all events.
    event_handler.add_events_to_asdf_file(f)

    # Write all StationXML files at once.
    write_stationxmls(
        f, channel_information, starttime=min_starttime - 3600,
        endtime=max_endtime + 3600)

    print("\n\nWritten %i SAC files to '%s'." % (count, f.filename))
Ejemplo n.º 56
0
def main(args, config):
    if args.listSources:
        print('%-15s\t%-40s' % ('Network', 'Description'))
        print('------------------------------------------')
        for key, value in SUPPORTED_NETWORKS.items():
            print('%-15s\t%-40s' % (key, value))
        sys.exit(0)

    if args.doConfig:
        doConfig()
        sys.exit(0)
    if args.eventID and config is None:
        print(
            'To specify event ID, you must have configured the ShakeHome parameter in the config file.'
        )
        print('Re-run with -config.  Returning.')
        sys.exit(1)

    #Get the output folder
    outfolder, rawfolder = getOutFolders(args, config)
    if not os.path.isdir(rawfolder):
        os.makedirs(rawfolder)

    if args.eventID and (hasattr(args, 'time') or hasattr(args, 'lat')
                         or hasattr(args, 'lon')):
        print('Supply EITHER eventID OR time,lat,lon - not both')
        sys.exit(1)

    if (args.user and not args.password) or (args.password and not args.user):
        print('You must supply both KNET username AND password')
        sys.exit(1)

    if args.eventID:
        eventfile = os.path.join(config.get('SHAKEMAP', 'shakehome'), 'data',
                                 args.eventID, 'input', 'event.xml')
        etime, lat, lon = util.parseEvent(eventfile)

    if args.Params:
        etime = args.Params.time
        lat = args.Params.lat
        lon = args.Params.lon

    #Most formats are pre-calibrated, so we'll set parse to None for those.
    #Those formats that need a parser object (like SAC data files need a dataless SEED file)
    #will fill in the parser object below.
    parser = None
    seedresp = None
    datafiles = []
    if not args.inputFolder:
        if args.source == 'orfeus':
            stationlist = orfeus.getAmps(lat, lon, etime, args.timeWindow,
                                         args.radius)
            outfile, stationlist_tag = trace2xml.amps2xml(
                stationlist, outfolder, 'orfeus')
            print('Wrote amps from %i stations to data file %s\n' %
                  (len(stationlist), outfile))
            sys.exit(0)
        if args.source == 'knet':
            if not args.user:
                user = config.get('KNET', 'user')
                password = config.get('KNET', 'password')
            else:
                user = args.user
                password = args.password
            sys.stderr.write('Fetching strong motion data from NIED...\n')
            fetcher = knet.KNETFetcher(user, password)
        elif args.source == 'geonet':
            sys.stderr.write('Fetching strong motion data from GeoNet...\n')
            fetcher = geonet.GeonetFetcher()
        elif args.source == 'turkey':
            sys.stderr.write('Fetching strong motion data from Turkey...\n')
            fetcher = turkey.TurkeyFetcher()
        elif args.source == 'iran':
            print(
                'Automated downloading of Iran strong motion data is not supported.  Use the -i option instead.'
            )
            print(
                'Obtain strong motion records from: http://www.bhrc.ac.ir/portal/Default.aspx?tabid=635'
            )
            sys.exit(1)
        elif args.source == 'sac':
            print(
                'Automated downloading of SAC strong motion data is not supported.  Use the -i option instead.'
            )
            print(
                'SAC is a data standard, not a source.  You will need to have obtained SAC data from your own source.'
            )
            sys.exit(1)
        elif args.source == 'chile':
            print(
                'Automated downloading of Chilean calibrated ASCII strong motion data is not supported.  Use the -i option instead.'
            )
            sys.exit(1)
        elif args.source == 'pickle':
            print(
                'Automated downloading of Chilean calibrated ASCII strong motion data is not supported.  Use the -i option instead.'
            )
            sys.exit(1)
        elif args.source == 'iris':
            sys.stderr.write(
                'Fetching strong motion and broadband data from IRIS...\n')
            fetcher = iris.IrisFetcher(
                verbose=args.verbose)  #will get strong motion AND broadband
        elif args.source == 'italy':
            print(
                'Automated downloading of Italian strong motion data is not supported.  Use the -i option instead.'
            )
            sys.exit(1)
        elif args.source == 'unam':
            print(
                'Automated downloading of Mexican (UNAM) strong motion data is not supported.  Use the -i option instead.'
            )
            sys.exit(1)
        else:
            print('Data source %s not supported.' % args.source)
            sys.exit(1)
        try:
            datafiles = fetcher.fetch(lat, lon, etime, args.radius,
                                      args.timeWindow, rawfolder)
        except Exception as e:
            print(
                '(Possible) error in trying to download data from %s.  \n"%s"\n'
                % (args.source, str(e)))
        sys.stderr.write('Retrieved %i files.\n' % len(datafiles))
    else:

        if not os.path.isdir(args.inputFolder):
            print('Could not find folder "%s".  Exiting.' % args.inputFolder)
            sys.exit(1)
        if args.source == 'orfeus':
            print('Offline data processing not supported for Orfeus.')
            sys.exit(1)
        if args.source == 'knet':
            datafiles1 = glob.glob(os.path.join(args.inputFolder, '*.NS'))
            datafiles2 = glob.glob(os.path.join(args.inputFolder, '*.EW'))
            datafiles3 = glob.glob(os.path.join(args.inputFolder, '*.UD'))
            datafiles = datafiles1 + datafiles2 + datafiles3
        elif args.source == 'geonet':
            datafiles = glob.glob(os.path.join(args.inputFolder, '*.V1A'))
        elif args.source == 'turkey':
            datafiles1 = glob.glob(os.path.join(args.inputFolder, '*.txt'))
            datafiles = []
            for d in datafiles1:
                dpath, dfile = os.path.split(d)
                if re.match('[0-9]{4}', dfile) is not None:
                    datafiles.append(d)
        elif args.source == 'iran':
            datafiles = glob.glob(os.path.join(args.inputFolder, '*.V1'))
        elif args.source == 'chile':
            datafiles = glob.glob(os.path.join(args.inputFolder, '*.asc'))
        elif args.source == 'pickle':
            datafiles = glob.glob(os.path.join(args.inputFolder, '*.pickle'))
        elif args.source == 'iris':
            datafiles = glob.glob(os.path.join(args.inputFolder, '*.sac'))
        elif args.source == 'italy':
            datafiles = glob.glob(os.path.join(args.inputFolder, '*DAT'))
        elif args.source == 'sac':
            datafiles = glob.glob(os.path.join(args.inputFolder, '*.sac'))
            if not len(datafiles):
                datafiles = glob.glob(
                    os.path.join(args.inputFolder, '*.pickle'))
            seedfiles = glob.glob(os.path.join(args.inputFolder, '*.seed'))
            respfiles = glob.glob(os.path.join(args.inputFolder, '*.resp'))
            if not len(seedfiles):
                if not len(respfiles):
                    print(
                        'A dataless SEED file (ending in .seed) or a RESP file (ending in .resp) must be supplied with input SAC files. Exiting.'
                    )
                    sys.exit(1)
                else:
                    seedresp = {
                        'filename': respfiles[0],  # RESP filename
                        # when using Trace/Stream.simulate() the "date" parameter can
                        # also be omitted, and the starttime of the trace is then used.
                        'date': obspy.UTCDateTime(etime),
                        # Units to return response in ('DIS', 'VEL' or ACC)
                        'units': 'ACC'
                    }
            else:
                parser = Parser(seedfiles[0])
        elif args.source == 'unam':
            tdatafiles = glob.glob(os.path.join(args.inputFolder,
                                                '*'))  #grab everything
            datafiles = []
            for dfile in tdatafiles:
                fname, fext = os.path.splitext(dfile)
                if re.match('\d', fext[1:]) is not None:
                    datafiles.append(dfile)
        else:
            print('Data source %s not supported.' % args.source)
            sys.exit(1)

    traces = []
    for dfile in datafiles:
        if args.source == 'knet':
            if dfile.endswith(
                    '1'):  #these files are KikNet downhole (deep) stations
                continue
            trace, header = knet.readknet(dfile)
            traces.append(trace)
        elif args.source == 'geonet':
            tracelist, headers = geonet.readgeonet(dfile)
            traces = traces + tracelist
        elif args.source == 'turkey':
            tracelist, headers = turkey.readturkey(dfile)
            traces = traces + tracelist
        elif args.source == 'iran':
            doRotation = True
            if args.noRotation:
                doRotation = False
            tracelist, headers = iran.readiran(dfile, doRotation=doRotation)
            traces = traces + tracelist
        elif args.source == 'iris':
            trace = iris.readiris(dfile)
            traces.append(trace)
        elif args.source == 'italy':
            trace = italy.readitaly(dfile)
            traces.append(trace)
        elif args.source == 'chile':
            trace = chile.readchile(dfile)
            traces.append(trace)
        elif args.source == 'pickle':
            stream = obspy.core.read(dfile)
            for trace in stream:
                traces.append(trace)
        elif args.source == 'unam':
            tracelist, headers = unam.readunam(dfile)
            traces = traces + tracelist
        elif args.source == 'sac':
            stream = obspy.read(dfile)
            for trace in stream:
                traces.append(trace)
        else:
            print('Source %s is not supported' % (args.source))
            sys.exit(1)
    if len(datafiles):
        sys.stderr.write('Converting %i files to peak ground motion...\n' %
                         len(datafiles))
        stationfile, plotfiles, tag = trace2xml.trace2xml(traces,
                                                          parser,
                                                          outfolder,
                                                          args.source,
                                                          doPlot=args.doPlot,
                                                          seedresp=seedresp)
        if args.debug:
            os.remove(stationfile)
            for pfile in plotfiles:
                os.remove(pfile)
            printTag(tag)

        #if the user specified an input folder, but did not specify to keep, keep anyway
        if args.nuke:
            for dfile in datafiles:
                os.remove(dfile)
        else:
            if not args.debug:
                sys.stderr.write('Wrote %i channels to data file %s\n' %
                                 (len(traces), stationfile))
    sys.exit(0)
def merge_and_slice2oneday_obspy(tar_file_list, out_sacfile_stage2, log_file):
    print(out_sacfile_stage2)
    st = obspy.read(tar_file_list[0], dtype='float64')
    if len(tar_file_list) > 1:
        for tar_file in tar_file_list[1:]:
            st = st + obspy.read(tar_file, dtype='float64')

    print(st)
    for t in range(len(st)):
        sr = st[t].stats.sampling_rate
        print(sr)
        if sr != 100:
            shutil.move(tar_file_list[t], '/home/data/Changning/s6data')
    st = st.select(sampling_rate=100)
    print(st)

    st_merge = st.merge(fill_value=0)
    if len(st_merge) != 1:
        raise ValueError('The merge and slice result is wrong!')
    sampling_rate = round(st_merge[0].stats.sampling_rate)

    str_day = out_sacfile_stage2.split('/')[-2]
    day_date = UTCDateTime(str_day)
    tar_ts = day_date
    tar_te = tar_ts + 86400 - 1 / sampling_rate

    st_final = st_merge.slice(tar_ts, tar_te)
    if len(st_final) == 0:
        print('There is no data for %s' % (out_sacfile_stage2.split('/')[-2:]))
        with open(log_file, 'a') as f:
            f.writelines('There is no data for %s' %
                         (out_sacfile_stage2.split('/')[-2:]))
    else:
        tr = st_final[0]

        start_time = tr.stats.starttime
        end_time = tr.stats.endtime

        before_points = round((start_time - tar_ts) * round(sampling_rate))
        stats_before = tr.stats.copy()
        stats_before.starttime = tar_ts
        stats_before.npts = before_points
        tr_before = trace.Trace(data=np.array([float(0)] * before_points),
                                header=stats_before)

        after_points = round((tar_te - end_time) * round(sampling_rate))
        stats_after = tr.stats.copy()
        stats_after.starttime = end_time + 1 / sampling_rate
        stats_after.npts = after_points
        tr_after = trace.Trace(data=np.array([float(0)] * after_points),
                               header=stats_after)

        tr_final = tr_before + tr + tr_after
        tr_final.stats.sac['delta'] = 1 / sampling_rate
        tr_final.stats.sac['b'] = 0
        tr_final.stats.sac[
            'e'] = tr_final.stats.endtime - tr_final.stats.starttime
        tr_final.stats.sac['npts'] = tr_final.stats.npts
        tr_final.stats.sac['nzyear'] = tr_final.stats.starttime.year
        tr_final.stats.sac['nzjday'] = tr_final.stats.starttime.julday
        tr_final.stats.sac['nzhour'] = tr_final.stats.starttime.hour
        tr_final.stats.sac['nzmin'] = tr_final.stats.starttime.minute
        tr_final.stats.sac['nzsec'] = tr_final.stats.starttime.second
        tr_final.stats.sac['nzmsec'] = tr_final.stats.starttime.microsecond
        tr_final.write(out_sacfile_stage2,
                       format='SAC')  # LPSPOL is wrong in the head.

    return None
Ejemplo n.º 58
0
def extract_mseed(startend_fname,
                  network,
                  data_dir="./",
                  output_dir="./",
                  extension="mseed"):
    """Extract specific time blocks from a set of miniseed files.

    Reads a large set of miniseed files, trims out specified time
    block(s), and writes the trimmed block(s) to disk. Useful for
    condensing a large dataset consisting of miniseed files written at
    the end of each hour to a single file that spans several hours.
    Stations which share an array name will appear in a common
    directory.

    Parameters
    ----------
    startend_fname : str
        Name of .csv file with start and end times. An example file is
        provided `here <https://github.com/jpvantassel/swprocess/blob/main/examples/extract/extract_startandend.csv>`_
    network : str
        Short string of characters to identify the network. Exported
        files will utilize this network code as its prefix.
    data_dir : str, optional
        The full or a relative file path to the directory containing the
        miniseed files, default is the current directory.
    output_dir : str, optional
        The full or a relative file path to the location to place the
        output miniseed files, default is the current directory.
    extension : {"mseed", "miniseed"}, optional
        Extension used for miniSEED format, default is `"mseed"`.

    Returns
    -------
    None
        Writes folder and files to disk.

    """
    # Read start and end times.
    dtype = {
        "folder name": str,
        "array name": str,
        "station number": int,
        "start year": int,
        "start month": int,
        "start day": int,
        "start hour": int,
        "start minute": int,
        "start second": int,
        "end year": int,
        "end month": int,
        "end day": int,
        "end hour": int,
        "end minute": int,
        "end second": int,
        "notes": str
    }
    try:
        with warnings.catch_warnings():
            warnings.simplefilter("ignore")
            df = pd.read_csv(startend_fname, dtype=dtype)
    except:
        raise NotImplementedError("File type not recognized.")

    # Loop through across defined timeblocks.
    logger.info("Begin iteration across dataframe ...")
    total = df["folder name"].count()
    for index, series in df.iterrows():
        logger.debug(f"\tindex={index} series={series}")

        # Start and end time.
        starttime = datetime.datetime(year=series["start year"],
                                      month=series["start month"],
                                      day=series["start date"],
                                      hour=series["start hour"],
                                      tzinfo=datetime.timezone.utc)
        logging.debug(f"\t\tstarttime={starttime}")
        currenttime = starttime

        endtime = datetime.datetime(year=series["end year"],
                                    month=series["end month"],
                                    day=series["end date"],
                                    hour=series["end hour"],
                                    tzinfo=datetime.timezone.utc)
        logging.debug(f"\t\tendtime={endtime}")

        # Avoid nonsensical time blocks.
        if endtime < starttime:
            msg = f"endtime={endtime} is less than starttime={starttime}."
            raise ValueError(msg)

        # Loop across the required hours and merge traces.
        append = False
        dt = datetime.timedelta(hours=1)
        while currenttime <= endtime:

            # miniSEED file name: NW.STNSN_SENSOR_YYYYMMDD_HH0000.miniseed
            fname = f"{network}.STN{str(series['station number']).zfill(2)}_{currenttime.year}{str(currenttime.month).zfill(2)}{str(currenttime.day).zfill(2)}_{str(currenttime.hour).zfill(2)}0000.{extension}"

            # Read current file and append if necessary
            if append:
                master += obspy.read(f"{data_dir}{fname}")
            else:
                master = obspy.read(f"{data_dir}{fname}")
                append = True

            currenttime += dt

        master = master.merge(method=1)

        # Trim merged traces between specified start and end times
        trim_start = obspy.UTCDateTime(series["start year"],
                                       series["start month"],
                                       series["start date"],
                                       series["start hour"],
                                       series["start minute"],
                                       series["start second"])
        trim_end = obspy.UTCDateTime(series["end year"], series["end month"],
                                     series["end date"], series["end hour"],
                                     series["end minute"],
                                     series["end second"])
        master.trim(starttime=trim_start, endtime=trim_end)

        # Store new miniseed files in folder titled "Array Miniseed"
        folder = series["folder name"]
        if not os.path.isdir(f"{output_dir}{folder}"):
            logger.info(f"Creating folder: {output_dir}{folder}")
            os.mkdir(f"{output_dir}{folder}")

        # Unmask masked array.
        for tr in master:
            if isinstance(tr.data, np.ma.masked_array):
                tr.data = tr.data.filled()
                msg = f"{folder}/{network}.STN{str(series['station number']).zfill(2)} was a masked array."
                warnings.warn(msg)

        # Write trimmed file to disk.
        fname_out = f"{output_dir}{folder}/{network}.STN{str(series['station number']).zfill(2)}.{series['array name']}.{extension}"
        logger.info(
            f"Extracted {index+1} of {total}. Extracting data from station {str(series['station number']).zfill(2)}. Creating file: {fname_out}."
        )

        master.write(fname_out, format="mseed")
Ejemplo n.º 59
0
if cut_filter:
    stanames = genfromtxt(
        '/Users/dmelgar/Chiapas2017/GPS/station_info/gps5Hz.sta',
        usecols=0,
        dtype='S')
    coords = genfromtxt(
        '/Users/dmelgar/Chiapas2017/GPS/station_info/gps5Hz.sta',
        usecols=[1, 2])
    coords[:, 0] = coords[:, 0]
    for k in range(len(stanames)):
        try:
            sta = stanames[k]
            print sta
            print k
            n = read(path + 'neu/' + sta + '.LXN.sac')
            e = read(path + 'neu/' + sta + '.LXE.sac')
            u = read(path + 'neu/' + sta + '.LXZ.sac')

            #Low pass filter
            #n[0].data=lowpass(n[0].data,fcorner,1./n[0].stats.delta,10)
            #e[0].data=lowpass(e[0].data,fcorner,1./e[0].stats.delta,10)
            #u[0].data=lowpass(u[0].data,fcorner,1./u[0].stats.delta,10)
            #decimate
            n = stdecimate(n, 5, 4)
            e = stdecimate(e, 5, 4)
            u = stdecimate(u, 5, 4)

            #Get station to hypocenter delta distance
            delta = locations2degrees(coords[k, 1], coords[k, 0], epicenter[1],
                                      epicenter[0])
# prepara entrada do seedresp
pre_filt = (
    0.007, 0.01, 0.1, 0.2
)  # define a filter band to prevent amplifying noise during the deconvolution

date = UTCDateTime(
    "2018-01-1T00:00:00.000"
)  # this can be the date of your raw data or any date for which the RESP-file is valid

# Remove resposta do instrumento para as três componentes
#HHE
searchfiles('HHE')
with open('filenames.txt', 'r') as f:
    for line in f:
        st = read(line[:-1])  # tira o \n

        #remove mean trend
        st.detrend(type="demean")
        st.detrend(type="linear")
        st.taper(max_percentage=0.05)

        respf = ("./FRTB_resp/RESP.BL.FRTB..HHE")  #caminho e nome do RESP file

        seedresp = {
            'filename': respf,  # RESP filename
            'date': date,
            'units': 'DIS'  # Units to return response in ('DIS', 'VEL' or ACC)
        }

        # Remove instrument response using the information from the given RESP file