def test_evalrespBug395(self): """ Was a bug due to inconstistent numerical range """ resp = os.path.join(self.path, 'RESP.CH._.HHZ.gz') fh = NamedTemporaryFile() tmpfile = fh.name fh.write(gzip.open(resp).read()) fh.close() samprate = 120.0 nfft = 56328 args = [1/samprate, nfft, tmpfile, UTCDateTime(2012, 9, 4, 5, 12, 15, 863300)] kwargs = {'units': 'VEL', 'freq': True} h, f = evalresp(*args, **kwargs) self.assertEquals(len(f), nfft // 2 + 1) os.unlink(tmpfile)
def test_read_events(self): """ Tests reading a QuakeML document via read_events. """ with NamedTemporaryFile() as tf: tmpfile = tf.name catalog = read_events(self.neries_filename) self.assertTrue(len(catalog), 3) catalog.write(tmpfile, format='QUAKEML') # Read file again. Avoid the (legit) warning about the already used # resource identifiers. with warnings.catch_warnings(record=True): warnings.simplefilter("ignore") catalog2 = read_events(tmpfile) self.assertTrue(len(catalog2), 3)
def test_raise_on_empty_file(self): """ Test case ensures that empty files do raise warnings. """ with NamedTemporaryFile() as tf: tmpfile = tf.name # create empty file open(tmpfile, 'wb').close() formats_ep = _get_default_eps('obspy.plugin.waveform', 'readFormat') # using format keyword for ep in formats_ep.values(): is_format = load_entry_point( ep.dist.key, 'obspy.plugin.waveform.' + ep.name, 'isFormat') self.assertFalse(False, is_format(tmpfile))
def test_plotDefaultSection(self): """ Tests plotting 10 in a section """ start = UTCDateTime(0) st = Stream() for _i in range(10): st += self._createStream(start, start + 3600, 100) st[-1].stats.distance = _i * 10e3 # create and compare image with NamedTemporaryFile(suffix='.png') as tf: st.plot(outfile=tf.name, type='section') # compare images expected_image = os.path.join(self.path, 'waveform_default_section.png') compare_images(tf.name, expected_image, 0.001)
def test_write_quakeml(self): """ Tests writing a QuakeML document. """ filename = os.path.join(self.path, 'qml-example-1.2-RC3.xml') with NamedTemporaryFile() as tf: tmpfile = tf.name catalog = _read_quakeml(filename) self.assertTrue(len(catalog), 1) _write_quakeml(catalog, tmpfile, validate=IS_RECENT_LXML) # Read file again. Avoid the (legit) warning about the already used # resource identifiers. with warnings.catch_warnings(record=True): warnings.simplefilter("ignore") catalog2 = _read_quakeml(tmpfile) self.assertTrue(len(catalog2), 1)
def test_file_with_no_timing_quality(self): """ Tests timing quality extraction in files with no timing quality. """ with NamedTemporaryFile() as tf1: obspy.Trace(data=np.arange(10, dtype=np.int32), header={"starttime": obspy.UTCDateTime(0)}).write( tf1.name, format="mseed") mseed_metadata = MSEEDMetadata([tf1.name], add_flags=True) ref = mseed_metadata.meta['miniseed_header_percentages'] self.assertEqual(ref['timing_quality_max'], None) self.assertEqual(ref['timing_quality_min'], None) self.assertEqual(ref['timing_quality_mean'], None)
def wrapped_func(filename, *args, **kwargs): if not isinstance(filename, basestring): return func(filename, *args, **kwargs) elif not os.path.exists(filename): msg = "File not found '%s'" % (filename) raise IOError(msg) # check if we got a compressed file unpacked_data = None if filename.endswith('.bz2'): # bzip2 try: import bz2 unpacked_data = bz2.decompress(open(filename, 'rb').read()) except: pass elif filename.endswith('.gz'): # gzip try: import gzip unpacked_data = gzip.open(filename, 'rb').read() except: pass if unpacked_data: # we unpacked something without errors - create temporary file tempfile = NamedTemporaryFile() tempfile._fileobj.write(unpacked_data) tempfile.close() # call wrapped function try: result = func(tempfile.name, *args, **kwargs) except: # clean up unpacking procedure if unpacked_data: tempfile.close() os.remove(tempfile.name) raise # clean up unpacking procedure if unpacked_data: tempfile.close() os.remove(tempfile.name) else: # call wrapped function with original filename result = func(filename, *args, **kwargs) return result
def test_plotSimpleGapFewSamples(self): """ Plots three hours with a gap. There are 45 minutes of data at the beginning and 45 minutes of data at the end. """ start = UTCDateTime(0) st = self._createStream(start, start + 3600 * 3 / 4, 5.0) st += self._createStream(start + 2.25 * 3600, start + 3 * 3600, 5.0) # create and compare image with NamedTemporaryFile(suffix='.png') as tf: st.plot(outfile=tf.name) # compare images expected_image = os.path.join( self.path, 'waveform_simple_gap_few_samples.png') compare_images(tf.name, expected_image, 0.001)
def test_plotOneHourManySamples(self): """ Plots one hour, starting Jan 1970. Uses a frequency of 1000 Hz to get a sample count of over 3 Million and get in the range, that plotting will choose to use a minimum maximum approach to plot the data. """ start = UTCDateTime(0) st = self._createStream(start, start + 3600, 1000.0) # create and compare image with NamedTemporaryFile(suffix='.png') as tf: st.plot(outfile=tf.name) # compare images expected_image = os.path.join( self.path, 'waveform_one_hour_many_samples.png') compare_images(tf.name, expected_image, 0.001)
def test_evalrespBug395(self): """ Was a bug due to inconstistent numerical range """ resp = os.path.join(self.path, 'RESP.CH._.HHZ.gz') with NamedTemporaryFile() as fh: tmpfile = fh.name fh.write(gzip.open(resp).read()) samprate = 120.0 nfft = 56328 args = [ 1.0 / samprate, nfft, tmpfile, UTCDateTime(2012, 9, 4, 5, 12, 15, 863300) ] kwargs = {'units': 'VEL', 'freq': True} _h, f = evalresp(*args, **kwargs) self.assertEquals(len(f), nfft // 2 + 1)
def test_can_add_npz_without_pickle(self): """ Ensure PPSD can be added without using the pickle protocol, or that a helpful error message is raised if allow_pickle is required. See #2409. """ ppsd = _internal_get_ppsd() # save PPSD in such a way to mock old versions. with NamedTemporaryFile(suffix='.npz') as ntemp: temp_path = ntemp.name self._save_npz_require_pickle(temp_path, ppsd) # We should be able to load the files when allowing pickle. ppsd.add_npz(temp_path, allow_pickle=True) # If not allow_pickle, a helpful error msg should be raised. with pytest.raises(ValueError, match='Loading PPSD results'): ppsd.add_npz(temp_path)
def test_PPSD_save_and_load_npz(self): """ Test PPSD.load_npz() and PPSD.save_npz() """ _, paz = _get_sample_data() ppsd = _get_ppsd() # save results to npz file with NamedTemporaryFile(suffix=".npz") as tf: filename = tf.name # test saving and loading an uncompressed file ppsd.save_npz(filename) ppsd_loaded = PPSD.load_npz(filename, metadata=paz) for key in NPZ_STORE_KEYS: np.testing.assert_equal(getattr(ppsd, key), getattr(ppsd_loaded, key))
def test_schema_validation(self): with NamedTemporaryFile() as tf: obspy.Trace(data=np.arange(10, dtype=np.int32), header={"starttime": obspy.UTCDateTime(0)}).write( tf.name, format="mseed") md = MSEEDMetadata(files=[tf.name]) # One can either directly validate the metrics. md.validate_qc_metrics(md.meta) # Or do it during the serialization. md.get_json_meta(validate=True) # Also try with extracting the flags. md = MSEEDMetadata(files=[tf.name], add_flags=True) md.validate_qc_metrics(md.meta) md.get_json_meta(validate=True)
def cmp_file(self, quakeml_file, sc3ml_file, path=None, validate=True, event_removal=False): """ Check if the generated sc3ml file is the same than the one in the data folder. """ if path is None: path = self.quakeml_path filename = os.path.join(path, quakeml_file) catalog = _read_quakeml(filename) with NamedTemporaryFile() as tf: tmpfile = tf.name catalog.write(tmpfile, format='SC3ML', validate=validate, verbose=True, event_removal=event_removal) filepath_cmp = os.path.join(self.path, sc3ml_file) self.assertTrue(filecmp.cmp(filepath_cmp, tmpfile))
def test_gap_fire_testing(self): """ Fire tests at a rapid rate to test the gap function Rapid gap testing. Create the following stream: 0 -- 1 -- x -- x -- 4 -- x -- x -- 7 -- 8 -- x -- 10 -- 11 -- And shoot as many strange windows, check if gaps are calculated correctly. Add your own! """ tr_1 = obspy.Trace(data=np.arange(2, dtype=np.int32), header={"starttime": obspy.UTCDateTime(5)}) tr_2 = obspy.Trace(data=np.arange(1, dtype=np.int32), header={"starttime": obspy.UTCDateTime(9)}) tr_3 = obspy.Trace(data=np.arange(2, dtype=np.int32), header={"starttime": obspy.UTCDateTime(12)}) tr_4 = obspy.Trace(data=np.arange(2, dtype=np.int32), header={"starttime": obspy.UTCDateTime(15)}) st = obspy.Stream(traces=[tr_1, tr_2, tr_3, tr_4]) with NamedTemporaryFile() as tf: st.write(tf.name, format="mseed") def _rapid_gap_testing(start, end): md = MSEEDMetadata(files=[tf.name], starttime=obspy.UTCDateTime(start), endtime=obspy.UTCDateTime(end)) return md.meta['sum_gaps'] self.assertTrue(_rapid_gap_testing(5, 17) == 5) self.assertTrue(_rapid_gap_testing(5, 10) == 2) self.assertTrue(_rapid_gap_testing(8.30, 9.5) == 0.70) self.assertTrue(_rapid_gap_testing(9, 12) == 2) self.assertTrue(_rapid_gap_testing(12, 17) == 1) self.assertTrue(_rapid_gap_testing(10, 13) == 2) self.assertTrue(_rapid_gap_testing(10.25, 13) == 1.75) self.assertTrue(_rapid_gap_testing(11.75, 17) == 1.25) self.assertTrue(_rapid_gap_testing(6, 10.5) == 2.5) self.assertTrue(_rapid_gap_testing(11.99, 12.01) == 0.01) self.assertTrue(_rapid_gap_testing(10.1, 12.01) == 1.9) self.assertTrue(_rapid_gap_testing(7.5, 14.25) == 3.75) self.assertTrue(_rapid_gap_testing(5, 17.5) == 5.5) self.assertTrue(_rapid_gap_testing(5, 17.6) == 5.6) self.assertTrue(_rapid_gap_testing(5, 18) == 6) self.assertTrue(_rapid_gap_testing(0, 5.01) == 5) self.assertTrue(_rapid_gap_testing(0, 20) == 13)
def wrapped_func(filename, *args, **kwargs): if not isinstance(filename, basestring): return func(filename, *args, **kwargs) elif not os.path.exists(filename): msg = "File not found '%s'" % (filename) raise IOError(msg) # check if we got a compressed file unpacked_data = None if filename.endswith(".bz2"): # bzip2 try: import bz2 unpacked_data = bz2.decompress(open(filename, "rb").read()) except: pass elif filename.endswith(".gz"): # gzip try: import gzip unpacked_data = gzip.open(filename, "rb").read() except: pass if unpacked_data: # we unpacked something without errors - create temporary file tempfile = NamedTemporaryFile() tempfile._fileobj.write(unpacked_data) tempfile.close() # call wrapped function try: result = func(tempfile.name, *args, **kwargs) except: # clean up unpacking procedure if unpacked_data: tempfile.close() os.remove(tempfile.name) raise # clean up unpacking procedure if unpacked_data: tempfile.close() os.remove(tempfile.name) else: # call wrapped function with original filename result = func(filename, *args, **kwargs) return result
def test_write_preferred_origin(self): event = full_test_event() preferred_origin = Origin(time=UTCDateTime("2012-03-26") + 2.2, latitude=47.0, longitude=35.0, depth=18000) event.origins.append(preferred_origin) event.preferred_origin_id = preferred_origin.resource_id with NamedTemporaryFile(suffix=".out") as tf: event.write(tf.name, format="NORDIC") event_back = read_events(tf.name) self.assertEqual(preferred_origin.latitude, event_back[0].origins[0].latitude) self.assertEqual(preferred_origin.longitude, event_back[0].origins[0].longitude) self.assertEqual(preferred_origin.depth, event_back[0].origins[0].depth) self.assertEqual(preferred_origin.time, event_back[0].origins[0].time)
def cmp_write_xslt_file(self, quakeml_file, sc3ml_file, validate=True, path=None): """ Check if the SC3ML file generated with the XSLT file is the same than the one in the data folder. """ if path is None: path = self.path transform = etree.XSLT(etree.parse(self.write_xslt_filename)) filename = os.path.join(path, quakeml_file) sc3ml_doc = transform(etree.parse(filename)) with NamedTemporaryFile() as tf: tf.write(sc3ml_doc) if validate: self.assertTrue(validate_sc3ml(tf.name)) filepath_cmp = os.path.join(self.path, sc3ml_file) self.assertTrue(filecmp.cmp(filepath_cmp, tf.name))
def test_writeQuakeML(self): """ Tests writing a QuakeML document. skipIfPython25 due to the use of the warnings context manager. """ filename = os.path.join(self.path, 'qml-example-1.2-RC3.xml') tmpfile = NamedTemporaryFile().name catalog = readQuakeML(filename) self.assertTrue(len(catalog), 1) writeQuakeML(catalog, tmpfile) # Read file again. Avoid the (legit) warning about the already used # resource identifiers. with warnings.catch_warnings(record=True): warnings.simplefilter("ignore") catalog2 = readQuakeML(tmpfile) self.assertTrue(len(catalog2), 1) # clean up os.remove(tmpfile)
def test_readEvents(self): """ Tests reading a QuakeML document via readEvents. skipIfPython25 due to the use of the warnings context manager. """ filename = os.path.join(self.path, 'neries_events.xml') tmpfile = NamedTemporaryFile().name catalog = readEvents(filename) self.assertTrue(len(catalog), 3) catalog.write(tmpfile, format='QUAKEML') # Read file again. Avoid the (legit) warning about the already used # resource identifiers. with warnings.catch_warnings(record=True): warnings.simplefilter("ignore") catalog2 = readEvents(tmpfile) self.assertTrue(len(catalog2), 3) # clean up os.remove(tmpfile)
def test_read_and_write_scardec_from_open_files(self): """ Tests that reading and writing a SCARDEC file does not change anything. This time it tests reading from and writing to open files. """ filename = os.path.join(self.datapath, "test.scardec") with open(filename, "rb") as fh: data = fh.read() fh.seek(0, 0) cat = obspy.read_events(fh) with NamedTemporaryFile() as tf: # raises two UserWarnings with warnings.catch_warnings(record=True) as w: warnings.simplefilter('always', UserWarning) cat.write(tf, format="SCARDEC") tf.seek(0, 0) new_data = tf.read() self.assertEqual(len(w), 2) self.assertEqual(w[0].category, UserWarning) self.assertIn('No moment wave magnitude found', str(w[0])) self.assertEqual(w[1].category, UserWarning) self.assertIn('No derived origin attached', str(w[1])) # Test file header self.assertEqual(data.decode().splitlines()[0:2], new_data.decode().splitlines()[0:2]) for line_data, line_new in zip(data.decode().splitlines()[2:], new_data.decode().splitlines()[2:]): # Compare time stamps self.assertTrue( np.allclose(float(line_data.split()[0]), float(line_new.split()[0]))) # Compare moment rate values self.assertTrue( np.allclose(float(line_data.split()[1]), float(line_new.split()[1])))
def test_IRIS_example_queries_event(self): """ Tests the (sometimes modified) example queries given on the IRIS web page. """ client = self.client queries = [ dict(eventid=609301), dict(starttime=UTCDateTime("2001-01-07T01:00:00"), endtime=UTCDateTime("2001-01-07T01:05:00"), catalog="ISC"), dict(starttime=UTCDateTime("2001-01-07T14:00:00"), endtime=UTCDateTime("2001-01-08T00:00:00"), minlatitude=15, maxlatitude=40, minlongitude=-170, maxlongitude=170, includeallmagnitudes=True, minmagnitude=4, orderby="magnitude"), ] result_files = [ "events_by_eventid.xml", "events_by_time.xml", "events_by_misc.xml", ] for query, filename in zip(queries, result_files): file_ = os.path.join(self.datapath, filename) # query["filename"] = file_ got = client.get_events(**query) expected = read_events(file_) self.assertEqual(got, expected, failmsg(got, expected)) # test output to file with NamedTemporaryFile() as tf: client.get_events(filename=tf.name, **query) with open(tf.name, 'rb') as fh: got = fh.read() with open(file_, 'rb') as fh: expected = fh.read() self.assertEqual(got, expected, filename + '\n' + failmsg(got, expected))
def test_read_and_write_scardec_from_files(self): """ Tests that reading and writing a SCARDECfile does not change anything. Note: The test file is not one from the catalogue, since it was impossible to recreate the number formatting. Therefore, the test file has been created with ObsPy, but was manually checked to be consistent with the original file """ filename = os.path.join(self.datapath, "test.scardec") with open(filename, "rb") as fh: data = fh.read() cat = obspy.read_events(filename) with NamedTemporaryFile() as tf: temp_filename = tf.name try: cat.write(temp_filename, format="SCARDEC") with open(temp_filename, "rb") as fh: new_data = fh.read() finally: try: os.remove(temp_filename) except: pass # Test file header self.assertEqual(data.decode().splitlines()[0:2], new_data.decode().splitlines()[0:2]) for line_data, line_new in zip(data.decode().splitlines()[2:], new_data.decode().splitlines()[2:]): # Compare time stamps self.assertTrue( np.allclose(float(line_data.split()[0]), float(line_new.split()[0]))) # Compare moment rate values self.assertTrue( np.allclose(float(line_data.split()[1]), float(line_new.split()[1])))
def test_read_and_write_cmtsolution_from_open_files(self): """ Tests that reading and writing a CMTSOLUTION file does not change anything. This time it tests reading from and writing to open files. """ filename = os.path.join(self.datapath, "CMTSOLUTION") with open(filename, "rb") as fh: data = fh.read() fh.seek(0, 0) cat = obspy.read_events(fh) with NamedTemporaryFile() as tf: cat.write(tf, format="CMTSOLUTION") tf.seek(0, 0) new_data = tf.read() self.assertEqual(data.decode().splitlines(), new_data.decode().splitlines())
def test_plotMultipleTraces(self): """ Plots multiple traces underneath. """ # 1 trace st = read()[0] with NamedTemporaryFile(suffix='.png') as tf: st.plot(outfile=tf.name, automerge=False) expected_image = os.path.join(self.path, 'waveform_1_trace.png') compare_images(tf.name, expected_image, 0.001) # 3 traces st = read() with NamedTemporaryFile(suffix='.png') as tf: st.plot(outfile=tf.name, automerge=False) expected_image = os.path.join(self.path, 'waveform_3_traces.png') compare_images(tf.name, expected_image, 0.001) # 5 traces st = st[1] * 5 with NamedTemporaryFile(suffix='.png') as tf: st.plot(outfile=tf.name, automerge=False) expected_image = os.path.join(self.path, 'waveform_5_traces.png') compare_images(tf.name, expected_image, 0.001) # 10 traces st = st[1] * 10 with NamedTemporaryFile(suffix='.png') as tf: st.plot(outfile=tf.name, automerge=False) expected_image = os.path.join(self.path, 'waveform_10_traces.png') compare_images(tf.name, expected_image, 0.001) # 10 traces - huge numbers st = st[1] * 10 for i, tr in enumerate(st): # scale data to have huge numbers st[i].data = tr.data * 10**i with NamedTemporaryFile(suffix='.png') as tf: st.plot(outfile=tf.name, automerge=False, equal_scale=False) expected_image = os.path.join(self.path, 'waveform_10_traces_huge.png') compare_images(tf.name, expected_image, 0.001) # 10 traces - tiny numbers st = st[1] * 10 for i, tr in enumerate(st): # scale data to have huge numbers st[i].data = tr.data / (10**i) with NamedTemporaryFile(suffix='.png') as tf: st.plot(outfile=tf.name, automerge=False, equal_scale=False) expected_image = os.path.join(self.path, 'waveform_10_traces_tiny.png') compare_images(tf.name, expected_image, 0.001)
def test_ppsd_save_and_load_npz(self): """ Test PPSD.load_npz() and PPSD.save_npz() """ _, paz = _get_sample_data() ppsd = _get_ppsd() # save results to npz file with NamedTemporaryFile(suffix=".npz") as tf: filename = tf.name # test saving and loading an uncompressed file ppsd.save_npz(filename) ppsd_loaded = PPSD.load_npz(filename, metadata=paz) for key in PPSD.NPZ_STORE_KEYS: if isinstance(getattr(ppsd, key), np.ndarray) or \ key == '_binned_psds': np.testing.assert_equal(getattr(ppsd, key), getattr(ppsd_loaded, key)) else: self.assertEqual(getattr(ppsd, key), getattr(ppsd_loaded, key))
def test_more_than_three_mags(self): cat = Catalog() cat += full_test_event() cat[0].magnitudes.append( Magnitude(mag=0.9, magnitude_type='MS', creation_info=CreationInfo('TES'), origin_id=cat[0].origins[0].resource_id)) with NamedTemporaryFile(suffix='.out') as tf: # raises UserWarning: mb is not convertible with warnings.catch_warnings(): warnings.simplefilter('ignore', UserWarning) cat.write(tf.name, format='nordic') # raises "UserWarning: AIN in header, currently unsupported" with warnings.catch_warnings(): warnings.simplefilter('ignore', UserWarning) cat_back = read_events(tf.name) for event_1, event_2 in zip(cat, cat_back): self.assertTrue( len(event_1.magnitudes) == len(event_2.magnitudes)) _assert_similarity(event_1, event_2)
def test_overlap_fire_testing(self): """ Fire tests at a rapid rate to test the overlap function Rapid overlap testing. Create the following stream: 0 -- 1 -- 2 -- 3 -- 4 -- 5 -- 6 -- 3 -- 4 -- 5 -- 6 -- 7 -- 8 -- 9 -- 10 -- 8 -- 9 -- 10 -- 11 -- And shoot as many strange windows, check if gaps are calculated correctly. Add your own! """ tr_1 = obspy.Trace(data=np.arange(7, dtype=np.int32), header={"starttime": obspy.UTCDateTime(0)}) tr_2 = obspy.Trace(data=np.arange(8, dtype=np.int32), header={"starttime": obspy.UTCDateTime(3)}) tr_3 = obspy.Trace(data=np.arange(4, dtype=np.int32), header={"starttime": obspy.UTCDateTime(7.5)}) st = obspy.Stream(traces=[tr_1, tr_2, tr_3]) with NamedTemporaryFile() as tf: st.write(tf.name, format="mseed") # Supplementary function to test overlaps rapidly with varying # start and endtimes def _rapid_overlap_testing(start, end): md = MSEEDMetadata(files=[tf.name], starttime=obspy.UTCDateTime(start), endtime=obspy.UTCDateTime(end)) return md.meta['sum_overlaps'] self.assertTrue(_rapid_overlap_testing(0, 12) == 7.5) self.assertTrue(_rapid_overlap_testing(3, 7) == 4.0) self.assertTrue(_rapid_overlap_testing(3, 5.5) == 2.5) self.assertTrue(_rapid_overlap_testing(4.5, 5.5) == 1.0) self.assertTrue(_rapid_overlap_testing(2, 5.25) == 2.25) self.assertTrue(_rapid_overlap_testing(2, 3) == 0.0) self.assertTrue(_rapid_overlap_testing(2, 3.1) == 0.1) self.assertTrue(_rapid_overlap_testing(7, 9) == 1.5) self.assertTrue(_rapid_overlap_testing(6.9, 9) == 1.6) self.assertTrue(_rapid_overlap_testing(4.30, 9) == 4.2) self.assertTrue(_rapid_overlap_testing(5.20, 9000) == 5.3)
def test_write_amplitude_time_window(self): """ Tests writing an QuakeML Amplitude with TimeWindow. """ filename = os.path.join(self.path, "qml-example-1.2-RC3.xml") with warnings.catch_warnings(record=True) as w: warnings.simplefilter("always") cat = _read_quakeml(filename) self.assertEqual(len(w), 0) with NamedTemporaryFile() as tf: tmpfile = tf.name cat.write(tmpfile, format='QUAKEML') with open(tmpfile, "rb") as fh: lines = fh.readlines() firstline = 45 while b"<amplitude " not in lines[firstline]: firstline += 1 got = [lines[i_].strip() for i_ in range(firstline, firstline + 13)] expected = [ b'<amplitude publicID="smi:nz.org.geonet/event/2806038g/' b'amplitude/1/modified">', b'<genericAmplitude>', b'<value>1e-08</value>', b'</genericAmplitude>', b'<type>A</type>', b'<category>point</category>', b'<unit>m/s</unit>', b'<timeWindow>', b'<reference>2007-10-10T14:40:39.055000Z</reference>', b'<begin>0.0</begin>', b'<end>0.51424</end>', b'</timeWindow>', b'</amplitude>'] self.assertEqual(got, expected)
def test_plotAzimSection(self): """ Tests plotting 10 in a azimuthal distant section """ start = UTCDateTime(0) st = Stream() for _i in range(10): st += self._createStream(start, start + 3600, 100) st[-1].stats.coordinates = AttribDict({ 'latitude': _i, 'longitude': _i }) # create and compare image with NamedTemporaryFile(suffix='.png') as tf: st.plot(outfile=tf.name, type='section', dist_degree=True, ev_coord=(0.0, 0.0)) # compare images expected_image = os.path.join(self.path, 'waveform_azim_section.png') compare_images(tf.name, expected_image, 0.001)
def test_write_with_extra_tags_namespace_redef(self): """ Tests the exceptions are raised when namespaces are redefined. """ filename = os.path.join( self.data_dir, "stationxml_with_availability.xml") # read the StationXML with availability inv = obspy.read_inventory(filename) with NamedTemporaryFile() as tf: # manually add custom namespace definition tmpfile = tf.name # assert that namespace prefix of xsi raises ValueError mynsmap = {'xsi': 'http://bad.custom.ns/'} self.assertRaises( ValueError, inv.write, path_or_file_object=tmpfile, format="STATIONXML", nsmap=mynsmap) # assert that namespace prefix of None raises ValueError mynsmap = {None: 'http://bad.custom.ns/'} self.assertRaises( ValueError, inv.write, path_or_file_object=tmpfile, format="STATIONXML", nsmap=mynsmap)
def test_deepcopy(self): """ Test for issue #689: deepcopy did not work for segy. In order to avoid complicated code to find test data for each waveform pluging, which read OK and have no errors we simply test by first writing the waveform and then reading it in. Thus test is limited to formats which we can also write. """ # find all plugins with both read and write method formats_write = \ set(_get_default_eps('obspy.plugin.waveform', 'writeFormat')) formats_read = \ set(_get_default_eps('obspy.plugin.waveform', 'readFormat')) formats = set.intersection(formats_write, formats_read) stream_orig = read() for format in formats: # TODO: these formats error in read and writing, not in # deepcopy if format in ('SAC', 'SACXY', 'SEG2', 'Q', 'WAV'): continue stream = deepcopy(stream_orig) # set some data dt = np.float32 if format in ('GSE2', 'MSEED'): dt = np.int32 for tr in stream: tr.data = np.arange(tr.stats.npts).astype(dt) with NamedTemporaryFile() as tf: tmpfile = tf.name with warnings.catch_warnings(): warnings.simplefilter("ignore") stream.write(format=format, filename=tmpfile) st = read(tmpfile, format=format) st.sort() st_deepcopy = deepcopy(st) st_deepcopy.sort() msg = "Error in wavform format=%s" % format self.assertEqual(str(st), str(st_deepcopy), msg=msg)
def evalresp(t_samp, nfft, filename, date, station='*', channel='*', network='*', locid='*', units="VEL", start_stage=-1, stop_stage=0, freq=False, debug=False): """ Use the evalresp library to extract instrument response information from a SEED RESP-file. To restrict the response to the instrument the start and stop stages can be specified here. :type t_samp: float :param t_samp: Sampling interval in seconds :type nfft: int :param nfft: Number of FFT points of signal which needs correction :type filename: str (or open file like object) :param filename: SEED RESP-filename or open file like object with RESP information. Any object that provides a read() method will be considered to be a file like object. :type date: UTCDateTime :param date: Date of interest :type station: str :param station: Station id :type channel: str :param channel: Channel id :type network: str :param network: Network id :type locid: str :param locid: Location id :type units: str :param units: Units to return response in. Can be either DIS, VEL or ACC :type start_stage: int :param start_stage: integer stage numbers of start stage (<0 causes default evalresp bahaviour). :type stop_stage: int :param stop_stage: integer stage numbers of stop stage :type debug: bool :param debug: Verbose output to stdout. Disabled by default. :rtype: numpy.ndarray complex128 :return: Frequency response from SEED RESP-file of length nfft """ if isinstance(filename, basestring): with open(filename, 'rb') as fh: data = fh.read() elif hasattr(filename, 'read'): data = filename.read() # evalresp needs files with correct line separators depending on OS fh = NamedTemporaryFile() #with NamedTemporaryFile() as fh: if 1: tempfile = fh.name fh.write(os.linesep.join(data.splitlines())) fh.close() fy = 1 / (t_samp * 2.0) # start at zero to get zero for offset/ DC of fft freqs = np.linspace(0, fy, nfft // 2 + 1) start_stage_c = C.c_int(start_stage) stop_stage_c = C.c_int(stop_stage) stdio_flag = C.c_int(0) sta = C.create_string_buffer(station) cha = C.create_string_buffer(channel) net = C.create_string_buffer(network) locid = C.create_string_buffer(locid) unts = C.create_string_buffer(units) if debug: vbs = C.create_string_buffer("-v") else: vbs = C.create_string_buffer("") rtyp = C.create_string_buffer("CS") datime = C.create_string_buffer(date.formatSEED()) fn = C.create_string_buffer(tempfile) nfreqs = C.c_int(freqs.shape[0]) res = clibevresp.evresp(sta, cha, net, locid, datime, unts, fn, freqs, nfreqs, rtyp, vbs, start_stage_c, stop_stage_c, stdio_flag, C.c_int(0)) # optimizing performance, see # http://wiki.python.org/moin/PythonSpeed/PerformanceTips nfreqs, rfreqs, rvec = res[0].nfreqs, res[0].freqs, res[0].rvec h = np.empty(nfreqs, dtype='complex128') f = np.empty(nfreqs, dtype='float64') for i in xrange(nfreqs): h[i] = rvec[i].real + rvec[i].imag * 1j f[i] = rfreqs[i] clibevresp.free_response(res) del nfreqs, rfreqs, rvec, res if freq: return h, f return h
def evalresp(t_samp, nfft, filename, date, station='*', channel='*', network='*', locid='*', units="VEL", freq=False, debug=False): """ Use the evalresp library to extract instrument response information from a SEED RESP-file. :type t_samp: float :param t_samp: Sampling interval in seconds :type nfft: int :param nfft: Number of FFT points of signal which needs correction :type filename: str :param filename: SEED RESP-filename or content of RESP file :type date: UTCDateTime :param date: Date of interest :type station: str :param station: Station id :type channel: str :param channel: Channel id :type network: str :param network: Network id :type locid: str :param locid: Location id :type units: str :param units: Units to return response in. Can be either DIS, VEL or ACC :type debug: bool :param debug: Verbose output to stdout. Disabled by default. :rtype: numpy.ndarray complex128 :return: Frequency response from SEED RESP-file of length nfft """ # evalresp needs files with correct line separators depending on OS data = open(filename, 'rb').read() fh = NamedTemporaryFile() tempfile = fh.name fh.write(os.linesep.join(data.splitlines())) fh.close() fy = 1 / (t_samp * 2.0) # start at zero to get zero for offset/ DC of fft freqs = np.linspace(0, fy, nfft // 2 + 1) start_stage = C.c_int(-1) stop_stage = C.c_int(0) stdio_flag = C.c_int(0) sta = C.create_string_buffer(station) cha = C.create_string_buffer(channel) net = C.create_string_buffer(network) locid = C.create_string_buffer(locid) unts = C.create_string_buffer(units) if debug: vbs = C.create_string_buffer("-v") else: vbs = C.create_string_buffer("") rtyp = C.create_string_buffer("CS") datime = C.create_string_buffer("%d,%3d" % (date.year, date.julday)) fn = C.create_string_buffer(tempfile) nfreqs = C.c_int(freqs.shape[0]) res = clibevresp.evresp(sta, cha, net, locid, datime, unts, fn, freqs, nfreqs, rtyp, vbs, start_stage, stop_stage, stdio_flag, C.c_int(0)) # optimizing performance, see # http://wiki.python.org/moin/PythonSpeed/PerformanceTips nfreqs, rfreqs, rvec = res[0].nfreqs, res[0].freqs, res[0].rvec h = np.empty(nfreqs, dtype='complex128') f = np.empty(nfreqs, dtype='float64') for i in xrange(nfreqs): h[i] = rvec[i].real + rvec[i].imag * 1j f[i] = rfreqs[i] clibevresp.free_response(res) del nfreqs, rfreqs, rvec, res # delete temporary file try: os.remove(tempfile) except: pass if freq: return h, f return h