Beispiel #1
0
 def test_issue165(self):
     """
     Test cases related to #165:
      - number of poles or zeros can be 0
      - an unsupported response information somewhere in the metadata should
        not automatically raise an Error, if the desired information can
        still be retrieved
     """
     parser = Parser(strict=True)
     file = os.path.join(self.path, "bug165.dataless")
     t = UTCDateTime("2010-01-01T00:00:00")
     parser.read(file)
     paz = parser.get_paz("NZ.DCZ.20.HNZ", t)
     result = {
         'digitizer_gain':
         419430.0,
         'gain':
         24595700000000.0,
         'poles': [(-981 + 1009j), (-981 - 1009j), (-3290 + 1263j),
                   (-3290 - 1263j)],
         'seismometer_gain':
         1.01885,
         'sensitivity':
         427336.0,
         'zeros': []
     }
     self.assertEqual(paz, result)
Beispiel #2
0
 def test_issue_157(self):
     """
     Test case for issue #157: re-using parser object.
     """
     expected = {
         'latitude': 48.162899,
         'elevation': 565.0,
         'longitude': 11.2752,
         'local_depth': 0.0,
         'azimuth': 0.0,
         'dip': -90.0
     }
     filename1 = os.path.join(self.path, 'dataless.seed.BW_FURT')
     filename2 = os.path.join(self.path, 'dataless.seed.BW_MANZ')
     t = UTCDateTime("2010-07-01")
     parser = Parser()
     parser.read(filename2)
     # parsing a second time will raise a UserWarning: Clearing parser
     # before every subsequent read()
     with warnings.catch_warnings(record=True):
         warnings.simplefilter("error", UserWarning)
         self.assertRaises(UserWarning, parser.read, filename1)
         warnings.simplefilter("ignore", UserWarning)
         parser.read(filename1)
         result = parser.get_coordinates("BW.FURT..EHZ", t)
         self.assertEqual(expected, result)
Beispiel #3
0
 def test_issue_358(self):
     """
     Test case for issue #358.
     """
     filename = os.path.join(self.path, 'CL.AIO.dataless')
     parser = Parser()
     parser.read(filename)
     dt = UTCDateTime('2012-01-01')
     parser.get_paz('CL.AIO.00.EHZ', dt)
Beispiel #4
0
 def test_issue_358(self):
     """
     Test case for issue #358.
     """
     filename = os.path.join(self.path, 'CL.AIO.dataless')
     parser = Parser()
     parser.read(filename)
     dt = UTCDateTime('2012-01-01')
     parser.get_paz('CL.AIO.00.EHZ', dt)
Beispiel #5
0
 def test_underline_in_site_name(self):
     """
     Test case for issue #1893.
     """
     filename = os.path.join(self.path, 'UP_BACU_HH.dataless')
     parser = Parser()
     parser.read(filename)
     # value given by pdccgg
     self.assertEqual(parser.blockettes[50][0].site_name,
                      'T3930_b A6689 3930')
Beispiel #6
0
 def test_underline_in_site_name(self):
     """
     Test case for issue #1893.
     """
     filename = os.path.join(self.path, 'UP_BACU_HH.dataless')
     parser = Parser()
     parser.read(filename)
     # value given by pdccgg
     self.assertEqual(parser.blockettes[50][0].site_name,
                      'T3930_b A6689 3930')
Beispiel #7
0
 def test_split_stations_dataless_to_xseed(self):
     """
     Test case for writing dataless to XSEED with multiple entries.
     """
     filename = os.path.join(self.path, 'dataless.seed.BW_DHFO')
     parser = Parser()
     parser.read(filename)
     with NamedTemporaryFile() as fh:
         tempfile = fh.name
         # this will create two files due to two entries in dataless
         parser.write_xseed(tempfile, split_stations=True)
         # the second file name is appended with the timestamp of start
         # period
         os.remove(tempfile + '.1301529600.0.xml')
Beispiel #8
0
 def test_split_stations_dataless_to_xseed(self):
     """
     Test case for writing dataless to XSEED with multiple entries.
     """
     filename = os.path.join(self.path, 'dataless.seed.BW_DHFO')
     parser = Parser()
     parser.read(filename)
     with NamedTemporaryFile() as fh:
         tempfile = fh.name
         # this will create two files due to two entries in dataless
         parser.write_xseed(tempfile, split_stations=True)
         # the second file name is appended with the timestamp of start
         # period
         os.remove(tempfile + '.1301529600.0.xml')
Beispiel #9
0
 def test_issue_361(self):
     """
     Test case for issue #361.
     """
     filename = os.path.join(self.path, 'G.SPB.dataless')
     parser = Parser()
     parser.read(filename)
     # 1 - G.SPB..BHZ - no Laplace transform - works
     parser.get_paz('G.SPB..BHZ')
     # 2 - G.SPB.00.BHZ - raises exception because of multiple results
     self.assertRaises(SEEDParserException, parser.get_paz, 'G.SPB.00.BHZ')
     # 3 - G.SPB.00.BHZ with datetime - no Laplace transform - works
     dt = UTCDateTime('2007-01-01')
     parser.get_paz('G.SPB.00.BHZ', dt)
     # 4 - G.SPB.00.BHZ with later datetime works
     dt = UTCDateTime('2012-01-01')
     parser.get_paz('G.SPB.00.BHZ', dt)
Beispiel #10
0
 def test_multiple_continued_station_control_header(self):
     """
     """
     # create a valid blockette 010 with record length 256
     b010 = b"0100042 2.4082008,001~2038,001~2009,001~~~"
     blockette = Blockette010(strict=True, compact=True)
     blockette.parse_seed(b010)
     self.assertEqual(b010, blockette.get_seed())
     # create a valid blockette 054
     b054 = b"0540960A0400300300000039"
     nr = b""
     for i in range(0, 78):
         # 960 chars
         nr = nr + ("+1.000%02dE-03" % i).encode('ascii', 'strict')
     blockette = Blockette054(strict=True, compact=True)
     blockette.parse_seed(b054 + nr)
     self.assertEqual(b054 + nr, blockette.get_seed())
     # create a blockette 051
     b051 = b'05100271999,123~~0001000000'
     blockette = Blockette051(strict=False)
     # ignore user warning
     with warnings.catch_warnings(record=True):
         warnings.simplefilter("ignore")
         blockette.parse_seed(b051)
     # combine data (each line equals 256 chars)
     data = b"000001V " + b010 + (b' ' * 206)
     data += b"000002S " + b054 + nr[0:224]  # 256-8-24 = 224
     data += b"000003S*" + nr[224:472]  # 256-8 = 248
     data += b"000004S*" + nr[472:720]
     data += b"000005S*" + nr[720:] + b051 + b' ' * 5  # 5 spaces left
     self.assertEqual(len(data), 256 * 5)
     data += b"000006S " + b054 + nr[0:224]  # 256-8-24 = 224
     data += b"000007S*" + nr[224:472]  # 256-8 = 248
     data += b"000008S*" + nr[472:720]
     data += b"000009S*" + nr[720:] + b' ' * 32  # 32 spaces left
     self.assertEqual(len(data), 256 * 9)
     # read records
     parser = Parser(strict=False)
     with warnings.catch_warnings():
         warnings.simplefilter("ignore")
         parser.read(data)
     # check results
     self.assertEqual(sorted(parser.blockettes.keys()), [10, 51, 54])
     self.assertEqual(len(parser.blockettes[10]), 1)
     self.assertEqual(len(parser.blockettes[51]), 1)
     self.assertEqual(len(parser.blockettes[54]), 2)
Beispiel #11
0
 def test_issue_361(self):
     """
     Test case for issue #361.
     """
     filename = os.path.join(self.path, 'G.SPB.dataless')
     parser = Parser()
     parser.read(filename)
     # 1 - G.SPB..BHZ - no Laplace transform - works
     parser.get_paz('G.SPB..BHZ')
     # 2 - G.SPB.00.BHZ - raises exception because of multiple results
     self.assertRaises(SEEDParserException, parser.get_paz, 'G.SPB.00.BHZ')
     # 3 - G.SPB.00.BHZ with datetime - no Laplace transform - works
     dt = UTCDateTime('2007-01-01')
     parser.get_paz('G.SPB.00.BHZ', dt)
     # 4 - G.SPB.00.BHZ with later datetime works
     dt = UTCDateTime('2012-01-01')
     parser.get_paz('G.SPB.00.BHZ', dt)
Beispiel #12
0
 def test_multiple_continued_station_control_header(self):
     """
     """
     # create a valid blockette 010 with record length 256
     b010 = b"0100042 2.4082008,001~2038,001~2009,001~~~"
     blockette = Blockette010(strict=True, compact=True)
     blockette.parse_seed(b010)
     self.assertEqual(b010, blockette.get_seed())
     # create a valid blockette 054
     b054 = b"0540960A0400300300000039"
     nr = b""
     for i in range(0, 78):
         # 960 chars
         nr = nr + ("+1.000%02dE-03" % i).encode('ascii', 'strict')
     blockette = Blockette054(strict=True, compact=True)
     blockette.parse_seed(b054 + nr)
     self.assertEqual(b054 + nr, blockette.get_seed())
     # create a blockette 051
     b051 = b'05100271999,123~~0001000000'
     blockette = Blockette051(strict=False)
     # ignore user warning
     with warnings.catch_warnings(record=True):
         warnings.simplefilter("ignore")
         blockette.parse_seed(b051)
     # combine data (each line equals 256 chars)
     data = b"000001V " + b010 + (b' ' * 206)
     data += b"000002S " + b054 + nr[0:224]  # 256-8-24 = 224
     data += b"000003S*" + nr[224:472]  # 256-8 = 248
     data += b"000004S*" + nr[472:720]
     data += b"000005S*" + nr[720:] + b051 + b' ' * 5  # 5 spaces left
     self.assertEqual(len(data), 256 * 5)
     data += b"000006S " + b054 + nr[0:224]  # 256-8-24 = 224
     data += b"000007S*" + nr[224:472]  # 256-8 = 248
     data += b"000008S*" + nr[472:720]
     data += b"000009S*" + nr[720:] + b' ' * 32  # 32 spaces left
     self.assertEqual(len(data), 256 * 9)
     # read records
     parser = Parser(strict=False)
     with warnings.catch_warnings():
         warnings.simplefilter("ignore")
         parser.read(data)
     # check results
     self.assertEqual(sorted(parser.blockettes.keys()), [10, 51, 54])
     self.assertEqual(len(parser.blockettes[10]), 1)
     self.assertEqual(len(parser.blockettes[51]), 1)
     self.assertEqual(len(parser.blockettes[54]), 2)
Beispiel #13
0
 def test_issue165(self):
     """
     Test cases related to #165:
      - number of poles or zeros can be 0
      - an unsupported response information somewhere in the metadata should
        not automatically raise an Error, if the desired information can
        still be retrieved
     """
     parser = Parser(strict=True)
     file = os.path.join(self.path, "bug165.dataless")
     t = UTCDateTime("2010-01-01T00:00:00")
     parser.read(file)
     paz = parser.get_paz("NZ.DCZ.20.HNZ", t)
     result = {'digitizer_gain': 419430.0, 'gain': 24595700000000.0,
               'poles': [(-981 + 1009j), (-981 - 1009j),
                         (-3290 + 1263j), (-3290 - 1263j)],
               'seismometer_gain': 1.01885, 'sensitivity': 427336.0,
               'zeros': []}
     self.assertEqual(paz, result)
Beispiel #14
0
 def test_issue_157(self):
     """
     Test case for issue #157: re-using parser object.
     """
     expected = {'latitude': 48.162899, 'elevation': 565.0,
                 'longitude': 11.2752, 'local_depth': 0.0,
                 'azimuth': 0.0, 'dip': -90.0}
     filename1 = os.path.join(self.path, 'dataless.seed.BW_FURT')
     filename2 = os.path.join(self.path, 'dataless.seed.BW_MANZ')
     t = UTCDateTime("2010-07-01")
     parser = Parser()
     parser.read(filename2)
     # parsing a second time will raise a UserWarning: Clearing parser
     # before every subsequent read()
     with warnings.catch_warnings(record=True):
         warnings.simplefilter("error", UserWarning)
         self.assertRaises(UserWarning, parser.read, filename1)
         warnings.simplefilter("ignore", UserWarning)
         parser.read(filename1)
         result = parser.get_coordinates("BW.FURT..EHZ", t)
         self.assertEqual(expected, result)
Beispiel #15
0
    def test_parsing_resp_file_without_clear_blkt_separation(self):
        """
        This is a slightly malformed RESP file that has two blockettes 58 at
        the end. Most RESP files separate blockettes with comments of which
        at least one contains a plus sign. This one does not so additional
        heuristics are needed.
        """
        filename = os.path.join(self.path, '6D6-Trillium-250sps.resp')
        parser = Parser()
        parser.read(filename)
        b = parser.blockettes[58][-1]
        self.assertEqual(b.stage_sequence_number, 0)
        self.assertEqual(b.number_of_history_values, 0)
        np.testing.assert_allclose(b.sensitivity_gain, 8.043400E+10)
        np.testing.assert_allclose(b.frequency, 1.0)

        # Also compare directly against what evalresp would do.
        obs_r = obspy.read_inventory(filename)[0][0][0].response\
            .get_evalresp_response_for_frequencies([0.0, 1.0, 10.0])
        evresp = evalresp_for_frequencies(0.01, [0.0, 1.0, 10.0], filename,
                                          obspy.UTCDateTime(2015, 1, 2))
        np.testing.assert_allclose(obs_r, evresp)
Beispiel #16
0
 def test_blockette_starts_after_record(self):
     """
     '... 058003504 1.00000E+00 0.00000E+0000 000006S*0543864 ... '
     ' 0543864' -> results in Blockette 005
     """
     # create a valid blockette 010 with record length 256
     b010 = b"0100042 2.4082008,001~2038,001~2009,001~~~"
     blockette = Blockette010(strict=True, compact=True)
     blockette.parse_seed(b010)
     self.assertEqual(b010, blockette.get_seed())
     # create a valid blockette 054
     b054 = b"0540240A0400300300000009" + (b"+1.58748E-03" * 18)
     blockette = Blockette054(strict=True, compact=True)
     blockette.parse_seed(b054)
     self.assertEqual(b054, blockette.get_seed())
     # combine data
     data = b"000001V " + b010 + (b' ' * 206)
     data += b"000002S " + b054 + (b' ' * 8)
     data += b"000003S*" + b054 + (b' ' * 8)
     # read records
     parser = Parser(strict=True)
     parser.read(data)
Beispiel #17
0
    def test_parsing_resp_file_without_clear_blkt_separation(self):
        """
        This is a slightly malformed RESP file that has two blockettes 58 at
        the end. Most RESP files separate blockettes with comments of which
        at least one contains a plus sign. This one does not so additional
        heuristics are needed.
        """
        filename = os.path.join(self.path, '6D6-Trillium-250sps.resp')
        parser = Parser()
        parser.read(filename)
        b = parser.blockettes[58][-1]
        self.assertEqual(b.stage_sequence_number, 0)
        self.assertEqual(b.number_of_history_values, 0)
        np.testing.assert_allclose(b.sensitivity_gain, 8.043400E+10)
        np.testing.assert_allclose(b.frequency, 1.0)

        # Also compare directly against what evalresp would do.
        obs_r = obspy.read_inventory(filename)[0][0][0].response\
            .get_evalresp_response_for_frequencies([0.0, 1.0, 10.0])
        evresp = evalresp_for_frequencies(0.01, [0.0, 1.0, 10.0], filename,
                                          obspy.UTCDateTime(2015, 1, 2))
        np.testing.assert_allclose(obs_r, evresp)
Beispiel #18
0
 def test_blockette_starts_after_record(self):
     """
     '... 058003504 1.00000E+00 0.00000E+0000 000006S*0543864 ... '
     ' 0543864' -> results in Blockette 005
     """
     # create a valid blockette 010 with record length 256
     b010 = b"0100042 2.4082008,001~2038,001~2009,001~~~"
     blockette = Blockette010(strict=True, compact=True)
     blockette.parse_seed(b010)
     self.assertEqual(b010, blockette.get_seed())
     # create a valid blockette 054
     b054 = b"0540240A0400300300000009" + (b"+1.58748E-03" * 18)
     blockette = Blockette054(strict=True, compact=True)
     blockette.parse_seed(b054)
     self.assertEqual(b054, blockette.get_seed())
     # combine data
     data = b"000001V " + b010 + (b' ' * 206)
     data += b"000002S " + b054 + (b' ' * 8)
     data += b"000003S*" + b054 + (b' ' * 8)
     # read records
     parser = Parser(strict=True)
     parser.read(data)