Пример #1
0
    def test_ppsd_add_npz(self):
        """
        Test PPSD.add_npz().
        """
        # set up a bogus PPSD, with fixed random psds but with real start times
        # of psd pieces, to facilitate testing the stack selection.
        ppsd = PPSD(stats=Stats(dict(sampling_rate=150)),
                    metadata=None,
                    db_bins=(-200, -50, 20.),
                    period_step_octaves=1.4)
        _times_processed = np.load(
            os.path.join(self.path, "ppsd_times_processed.npy")).tolist()
        # change data to nowadays used nanoseconds POSIX timestamp
        _times_processed = [UTCDateTime(t)._ns for t in _times_processed]
        np.random.seed(1234)
        _binned_psds = [
            arr for arr in np.random.uniform(-200, -50, (
                len(_times_processed), len(ppsd.period_bin_centers)))
        ]

        with NamedTemporaryFile(suffix=".npz") as tf1, \
                NamedTemporaryFile(suffix=".npz") as tf2, \
                NamedTemporaryFile(suffix=".npz") as tf3:
            # save data split up over three separate temporary files
            ppsd._times_processed = _times_processed[:200]
            ppsd._binned_psds = _binned_psds[:200]
            ppsd.save_npz(tf1.name)
            ppsd._times_processed = _times_processed[200:400]
            ppsd._binned_psds = _binned_psds[200:400]
            ppsd.save_npz(tf2.name)
            ppsd._times_processed = _times_processed[400:]
            ppsd._binned_psds = _binned_psds[400:]
            ppsd.matplotlib_version = "X.X.X"
            ppsd.save_npz(tf3.name)
            # now load these saved npz files and check if all data is present
            ppsd = PPSD.load_npz(tf1.name, metadata=None)
            ppsd.add_npz(tf2.name)
            # we changed a version number so this should emit a warning
            with warnings.catch_warnings(record=True) as w:
                warnings.simplefilter('always')
                ppsd.add_npz(tf3.name)
                self.assertEqual(len(w), 1)
            np.testing.assert_array_equal(_binned_psds, ppsd._binned_psds)
            np.testing.assert_array_equal(_times_processed,
                                          ppsd._times_processed)
            # adding data already present should also emit a warning and the
            # PPSD should not be changed
            with warnings.catch_warnings(record=True) as w:
                warnings.simplefilter('always')
                ppsd.add_npz(tf2.name)
                self.assertEqual(len(w), 1)
            np.testing.assert_array_equal(_binned_psds, ppsd._binned_psds)
            np.testing.assert_array_equal(_times_processed,
                                          ppsd._times_processed)
Пример #2
0
 def test_dataselect_bulk(self):
     """
     Test bulk dataselect requests, POSTing data to server. Also tests
     authenticated bulk request.
     """
     clients = [self.client, self.client_auth]
     file = os.path.join(self.datapath, "bulk.mseed")
     expected = read(file)
     # test cases for providing lists of lists
     bulk = (("TA", "A25A", "", "BHZ", UTCDateTime("2010-03-25T00:00:00"),
              UTCDateTime("2010-03-25T00:00:04")),
             ("TA", "A25A", "", "BHE", UTCDateTime("2010-03-25T00:00:00"),
              UTCDateTime("2010-03-25T00:00:06")),
             ("IU", "ANMO", "*", "HHZ", UTCDateTime("2010-03-25T00:00:00"),
              UTCDateTime("2010-03-25T00:00:08")))
     params = dict(quality="B", longestonly=False, minimumlength=5)
     for client in clients:
         # test output to stream
         got = client.get_waveforms_bulk(bulk, **params)
         self.assertEqual(got, expected, failmsg(got, expected))
         # test output to file
         with NamedTemporaryFile() as tf:
             client.get_waveforms_bulk(bulk, filename=tf.name, **params)
             got = read(tf.name)
         self.assertEqual(got, expected, failmsg(got, expected))
     # test cases for providing a request string
     bulk = ("quality=B\n"
             "longestonly=false\n"
             "minimumlength=5\n"
             "TA A25A -- BHZ 2010-03-25T00:00:00 2010-03-25T00:00:04\n"
             "TA A25A -- BHE 2010-03-25T00:00:00 2010-03-25T00:00:06\n"
             "IU ANMO * HHZ 2010-03-25T00:00:00 2010-03-25T00:00:08\n")
     for client in clients:
         # test output to stream
         got = client.get_waveforms_bulk(bulk)
         self.assertEqual(got, expected, failmsg(got, expected))
         # test output to file
         with NamedTemporaryFile() as tf:
             client.get_waveforms_bulk(bulk, filename=tf.name)
             got = read(tf.name)
         self.assertEqual(got, expected, failmsg(got, expected))
     # test cases for providing a file name
     for client in clients:
         with NamedTemporaryFile() as tf:
             with open(tf.name, "wt") as fh:
                 fh.write(bulk)
             got = client.get_waveforms_bulk(bulk)
         self.assertEqual(got, expected, failmsg(got, expected))
     # test cases for providing a file-like object
     for client in clients:
         got = client.get_waveforms_bulk(io.StringIO(bulk))
         self.assertEqual(got, expected, failmsg(got, expected))
Пример #3
0
    def test_multiple_sampling_rates(self, image_path):
        """
        Check for multiple sampling rates
        """
        lines = [
            "TIMESERIES XX_TEST__BHZ_R, 200 samples, 200 sps, "
            "2008-01-15T00:00:00.000000, SLIST, INTEGER, Counts",
            "TIMESERIES XX_TEST__BHZ_R,  50 samples,  50 sps, "
            "2008-01-15T00:00:00.900000, SLIST, INTEGER, Counts",
            "TIMESERIES XX_TEST__BHZ_R, 200 samples, 200 sps, "
            "2008-01-15T00:00:02.000000, SLIST, INTEGER, Counts",
        ]
        files = []
        expected = [
            "XX.TEST..BHZ 2008-01-15T00:00:01.000000Z "
            "2008-01-15T00:00:00.899995Z -0.100",
            "XX.TEST..BHZ 2008-01-15T00:00:01.899999Z "
            "2008-01-15T00:00:02.000000Z 0.100"
        ]
        with NamedTemporaryFile() as f1, NamedTemporaryFile() as f2, \
                NamedTemporaryFile() as f3:
            for i, fp in enumerate([f1, f2, f3]):
                fp.write(("%s\n" % lines[i]).encode('ascii', 'strict'))
                fp.flush()
                fp.seek(0)
                files.append(fp.name)

            # make image comparison instance and set manual rms (see #2089)

            with CatchOutput() as out:
                cmds = ['--output', str(image_path), '--print-gaps']
                obspy_scan(files + cmds)

            # read output and compare with expected
            # only check if datetime objects are close, not exact
            output = out.stdout.splitlines()
            for ex_line, out_line in zip(expected, output):
                ex_split = ex_line.split(' ')
                out_split = out_line.split(' ')
                for ex_str, out_str in zip(ex_split, out_split):
                    try:
                        utc1 = UTCDateTime(ex_str)
                        utc2 = UTCDateTime(out_str)
                    except (ValueError, TypeError):
                        # if str is not a datetime it should be equal
                        assert ex_str == out_str
                    else:
                        # datetimes just need to be close
                        t1, t2 = utc1.timestamp, utc2.timestamp
                        assert abs(t1 - t2) < .001
Пример #4
0
    def test_continuous_segments_combined(self):
        """
        Test continuous segments from traces in two files
        that are continuous. Also test a continuous segment
        that is continuous but has a different sampling rate
        """
        tr_1 = obspy.Trace(data=np.arange(10, dtype=np.int32),
                           header={"starttime": obspy.UTCDateTime(0)})
        tr_2 = obspy.Trace(data=np.arange(10, dtype=np.int32),
                           header={"starttime": obspy.UTCDateTime(10)})
        tr_3 = obspy.Trace(data=np.arange(10, dtype=np.int32),
                           header={
                               "starttime": obspy.UTCDateTime(20),
                               "sampling_rate": 0.5
                           })
        st = obspy.Stream(traces=[tr_1, tr_3])
        st2 = obspy.Stream(traces=[tr_2])
        with NamedTemporaryFile() as tf1, NamedTemporaryFile() as tf2:

            st.write(tf1.name, format="mseed")
            st2.write(tf2.name, format="mseed")
            md = MSEEDMetadata(files=[tf1.name, tf2.name])
            c_seg = md.meta["c_segments"]
            self.assertEqual(len(c_seg), 2)

            c = c_seg[0]
            self.assertEqual(c["start_time"], obspy.UTCDateTime(0))
            self.assertEqual(c["end_time"], obspy.UTCDateTime(20))
            self.assertEqual(c["segment_length"], 20)
            self.assertEqual(c["sample_min"], 0)
            self.assertEqual(c["sample_max"], 9)
            self.assertEqual(c["num_samples"], 20)
            self.assertEqual(c["sample_median"], 4.5)
            self.assertEqual(c["sample_lower_quartile"], 2.0)
            self.assertEqual(c["sample_upper_quartile"], 7.0)
            self.assertEqual(c["sample_rate"], 1.0)

            # Not continuous because of different sampling_rate (0.5)
            c = c_seg[1]
            self.assertEqual(c["start_time"], obspy.UTCDateTime(20))
            self.assertEqual(c["end_time"], obspy.UTCDateTime(40))
            self.assertEqual(c["segment_length"], 20)
            self.assertEqual(c["sample_min"], 0)
            self.assertEqual(c["sample_max"], 9)
            self.assertEqual(c["num_samples"], 10)
            self.assertEqual(c["sample_median"], 4.5)
            self.assertEqual(c["sample_lower_quartile"], 2.25)
            self.assertEqual(c["sample_upper_quartile"], 6.75)
            self.assertEqual(c["sample_rate"], 0.5)
Пример #5
0
    def test_write_amplitude_time_window(self):
        """
        Tests writing an QuakeML Amplitude with TimeWindow.
        """
        filename = os.path.join(self.path, "qml-example-1.2-RC3.xml")

        with warnings.catch_warnings(record=True) as w:
            warnings.simplefilter("always")
            cat = _read_quakeml(filename)
            self.assertEqual(len(w), 0)

        with NamedTemporaryFile() as tf:
            tmpfile = tf.name
            cat.write(tmpfile, format='QUAKEML')
            with open(tmpfile, "rb") as fh:
                lines = fh.readlines()

            firstline = 45
            while b"<amplitude " not in lines[firstline]:
                firstline += 1

            got = [
                lines[i_].strip() for i_ in range(firstline, firstline + 13)
            ]
            expected = [
                b'<amplitude publicID="smi:nz.org.geonet/event/2806038g/'
                b'amplitude/1/modified">', b'<genericAmplitude>',
                b'<value>1e-08</value>', b'</genericAmplitude>',
                b'<type>A</type>', b'<category>point</category>',
                b'<unit>m/s</unit>', b'<timeWindow>',
                b'<reference>2007-10-10T14:40:39.055000Z</reference>',
                b'<begin>0.0</begin>', b'<end>0.51424</end>', b'</timeWindow>',
                b'</amplitude>'
            ]
            self.assertEqual(got, expected)
Пример #6
0
    def test_write_no_preferred_focal_mechanism(self):
        """
        Tests that writing a CMTSOLUTION file with no preferred (but at least
        one) focal mechanism works, see #1303.
        """
        filename = os.path.join(self.datapath, "CMTSOLUTION")
        with open(filename, "rb") as fh:
            data = fh.read()

        cat = obspy.read_events(filename)
        cat[0].preferred_focal_mechanism_id = None

        with NamedTemporaryFile() as tf:
            temp_filename = tf.name

        try:
            cat.write(temp_filename, format="CMTSOLUTION")
            with open(temp_filename, "rb") as fh:
                new_data = fh.read()
        finally:
            try:
                os.remove(temp_filename)
            except:
                pass

        self.assertEqual(data.decode().splitlines(),
                         new_data.decode().splitlines())
Пример #7
0
    def test_read_and_write_cmtsolution_from_files(self):
        """
        Tests that reading and writing a CMTSOLUTION file does not change
        anything.
        """
        filename = os.path.join(self.datapath, "CMTSOLUTION")
        with open(filename, "rb") as fh:
            data = fh.read()

        cat = obspy.read_events(filename)

        with NamedTemporaryFile() as tf:
            temp_filename = tf.name

        try:
            cat.write(temp_filename, format="CMTSOLUTION")
            with open(temp_filename, "rb") as fh:
                new_data = fh.read()
        finally:
            try:
                os.remove(temp_filename)
            except:
                pass

        self.assertEqual(data.decode().splitlines(),
                         new_data.decode().splitlines())
Пример #8
0
def read_inventory(path_or_file_object=None, format=None):
    """
    Function to read inventory files.

    :param path_or_file_object: File name or file like object. If this
        attribute is omitted, an example :class:`Inventory`
        object will be returned.
    :type format: str, optional
    :param format: Format of the file to read (e.g. ``"STATIONXML"``).

    .. note::

        For handling additional information not covered by the
        StationXML standard and how to output it to StationXML
        see the :ref:`ObsPy Tutorial <stationxml-extra>`.
    """
    if path_or_file_object is None:
        # if no pathname or URL specified, return example catalog
        return _create_example_inventory()
    elif isinstance(path_or_file_object, (str, native_str)) and \
            "://" in path_or_file_object:
        # some URL
        # extract extension if any
        suffix = \
            os.path.basename(path_or_file_object).partition('.')[2] or '.tmp'
        with NamedTemporaryFile(suffix=suffix) as fh:
            download_to_file(url=path_or_file_object, filename_or_buffer=fh)
            return read_inventory(fh.name, format=format)
    return _read_from_plugin("inventory", path_or_file_object,
                             format=format)[0]
Пример #9
0
    def test_can_add_npz_without_pickle(self):
        """
        Ensure PPSD can be added without using the pickle protocol, or
        that a helpful error message is raised if allow_pickle is required.
        See #2409.
        """
        def _save_nps_require_pickle(filename, ppsd):
            """ Save npz in such a way that requires pickle to load"""
            out = {}
            for key in PPSD.NPZ_STORE_KEYS:
                out[key] = getattr(ppsd, key)
            np.savez_compressed(filename, **out)

        ppsd = _internal_get_ppsd()
        # save PPSD in such a way to mock old versions.
        with NamedTemporaryFile(suffix='.npz') as ntemp:
            temp_path = ntemp.name
            _save_nps_require_pickle(temp_path, ppsd)
            # We should be able to load the files when allowing pickle.
            ppsd.add_npz(temp_path, allow_pickle=True)
            # the rest of the test is only relevant on numpy versions that have
            # allow_pickle kwarg (starting with version 1.10.0), older versions
            # will always allow pickle and thus reading works
            if NUMPY_VERSION < [1, 10]:
                return
            # If not allow_pickle,  a helpful error msg should be raised.
            with self.assertRaises(ValueError) as context:
                ppsd.add_npz(temp_path)
            self.assertIn('Loading PPSD results', str(context.exception))
Пример #10
0
    def test_read_and_write_scardec_from_open_files(self):
        """
        Tests that reading and writing a SCARDEC file does not change
        anything.

        This time it tests reading from and writing to open files.
        """
        filename = os.path.join(self.datapath, "test.scardec")
        with open(filename, "rb") as fh:
            data = fh.read()
            fh.seek(0, 0)
            cat = obspy.read_events(fh)

        with NamedTemporaryFile() as tf:
            cat.write(tf, format="SCARDEC")
            tf.seek(0, 0)
            new_data = tf.read()

        # Test file header
        self.assertEqual(data.decode().splitlines()[0:2],
                         new_data.decode().splitlines()[0:2])

        for line_data, line_new in zip(data.decode().splitlines()[2:],
                                       new_data.decode().splitlines()[2:]):
            # Compare time stamps
            self.assertTrue(
                np.allclose(float(line_data.split()[0]),
                            float(line_new.split()[0])))
            # Compare moment rate values
            self.assertTrue(
                np.allclose(float(line_data.split()[1]),
                            float(line_new.split()[1])))
Пример #11
0
 def test_evalresp_specific_frequencies(self):
     """
     Test getting response for specific frequencies from evalresp
     """
     resp = os.path.join(self.path, 'RESP.CH._.HHZ.gz')
     # test some frequencies (results taken from routine
     # test_evalresp_bug_395)
     freqs = [
         0.0, 0.0021303792075, 0.21303792075, 0.63911376225, 2.1303792075,
         21.303792075, 59.9978696208, 60.0
     ]
     expected = [
         0j,
         -38033660.9731 + 14722854.5862j,
         623756964.698 + 34705336.5587j,
         625815840.91 + 11748438.5949j,
         634173301.327 - 2261888.45356j,
         689435074.739 - 216615642.231j,
         -105.682658137 - 4360.67242023j,
         -101.693155157 - 4172.61059939j,
     ]
     with NamedTemporaryFile() as fh:
         tmpfile = fh.name
         with gzip.open(resp) as f:
             fh.write(f.read())
         samprate = 120.0
         t = UTCDateTime(2012, 9, 4, 5, 12, 15, 863300)
         h = evalresp_for_frequencies(1.0 / samprate,
                                      freqs,
                                      tmpfile,
                                      t,
                                      units='VEL')
     np.testing.assert_allclose(h, expected)
Пример #12
0
    def test_saving_directly_to_file(self):
        # Save to a filename.
        with NamedTemporaryFile() as tf:
            filename = tf.name
            st = self.c.get_waveforms(model="test",
                                      network="IU",
                                      station="ANMO",
                                      eventid="GCMT:C201002270634A",
                                      starttime="P-10",
                                      endtime="P+10",
                                      components="Z",
                                      filename=tf)
            # No return value.
            self.assertTrue(st is None)

            st = obspy.read(filename)
            self.assertEqual(len(st), 1)

        # Save to an open file-like object.
        with io.BytesIO() as buf:
            st = self.c.get_waveforms(model="test",
                                      network="IU",
                                      station="ANMO",
                                      eventid="GCMT:C201002270634A",
                                      starttime="P-10",
                                      endtime="P+10",
                                      components="Z",
                                      filename=buf)
            # No return value.
            self.assertTrue(st is None)

            buf.seek(0, 0)
            st = obspy.read(buf)
            self.assertEqual(len(st), 1)
Пример #13
0
 def test_issue_193(self):
     """
     Test for issue #193: if non-contiguous array is written correctly.
     """
     warnings.filterwarnings("ignore", "Detected non contiguous data")
     # test all plugins with both read and write method
     formats_write = \
         set(_get_default_eps('obspy.plugin.waveform', 'writeFormat'))
     formats_read = \
         set(_get_default_eps('obspy.plugin.waveform', 'readFormat'))
     formats = set.intersection(formats_write, formats_read)
     # mseed will raise exception for int64 data, thus use int32 only
     data = np.arange(10, dtype=np.int32)
     # make array non-contiguous
     data = data[::2]
     tr = Trace(data=data)
     for format in formats:
         # XXX: skip SEGY and SU formats for now as they need some special
         # headers.
         if format in ['SEGY', 'SU', 'SEG2']:
             continue
         with NamedTemporaryFile() as tf:
             tempfile = tf.name
             tr.write(tempfile, format)
             if format == "Q":
                 tempfile = tempfile + ".QHD"
             tr_test = read(tempfile, format)[0]
             if format == 'Q':
                 os.remove(tempfile[:-4] + '.QBN')
                 os.remove(tempfile[:-4] + '.QHD')
         np.testing.assert_array_equal(tr.data, tr_test.data)
Пример #14
0
 def test_writing_invalid_quakeml_id(self):
     """
     Some ids might be invalid. We still want to write them to not mess
     with any external tools relying on the ids. But we also raise a
     warning of course.
     """
     filename = os.path.join(self.path, 'invalid_id.xml')
     cat = read_events(filename)
     self.assertEqual(
         cat[0].resource_id.id,
         "smi:org.gfz-potsdam.de/geofon/RMHP(60)>>ITAPER(3)>>BW(4,5,15)")
     with NamedTemporaryFile() as tf:
         with warnings.catch_warnings(record=True) as w:
             warnings.simplefilter("always")
             cat.write(tf.name, format="quakeml")
             cat2 = read_events(tf.name)
     self.assertEqual(len(w), 19)
     self.assertEqual(
         w[0].message.args[0],
         "'smi:org.gfz-potsdam.de/geofon/RMHP(60)>>ITAPER(3)>>BW(4,5,15)' "
         "is not a valid QuakeML URI. It will be in the final file but "
         "note that the file will not be a valid QuakeML file.")
     self.assertEqual(
         cat2[0].resource_id.id,
         "smi:org.gfz-potsdam.de/geofon/RMHP(60)>>ITAPER(3)>>BW(4,5,15)")
Пример #15
0
 def wrapped_func(filename, *args, **kwargs):
     if not isinstance(filename, basestring):
         return func(filename, *args, **kwargs)
     elif not os.path.exists(filename):
         msg = "File not found '%s'" % (filename)
         raise IOError(msg)
     # check if we got a compressed file
     unpacked_data = None
     if filename.endswith('.bz2'):
         # bzip2
         try:
             import bz2
             unpacked_data = bz2.decompress(open(filename, 'rb').read())
         except:
             pass
     elif filename.endswith('.gz'):
         # gzip
         try:
             import gzip
             unpacked_data = gzip.open(filename, 'rb').read()
         except:
             pass
     if unpacked_data:
         # we unpacked something without errors - create temporary file
         with NamedTemporaryFile() as tempfile:
             tempfile._fileobj.write(unpacked_data)
             # call wrapped function
             result = func(tempfile.name, *args, **kwargs)
     else:
         # call wrapped function with original filename
         result = func(filename, *args, **kwargs)
     return result
Пример #16
0
    def test_iris_example_queries_dataselect(self):
        """
        Tests the (sometimes modified) example queries given on IRIS webpage.
        """
        client = self.client

        queries = [
            ("IU", "ANMO", "00", "BHZ", UTCDateTime("2010-02-27T06:30:00.000"),
             UTCDateTime("2010-02-27T06:40:00.000")),
            ("IU", "A*", "*", "BHZ", UTCDateTime("2010-02-27T06:30:00.000"),
             UTCDateTime("2010-02-27T06:31:00.000")),
            ("IU", "A??", "*0", "BHZ", UTCDateTime("2010-02-27T06:30:00.000"),
             UTCDateTime("2010-02-27T06:31:00.000")),
        ]
        result_files = [
            "dataselect_example.mseed",
            "dataselect_example_wildcards.mseed",
            "dataselect_example_mixed_wildcards.mseed",
        ]
        for query, filename in zip(queries, result_files):
            # test output to stream
            got = client.get_waveforms(*query)
            file_ = os.path.join(self.datapath, filename)
            expected = read(file_)
            self.assertEqual(got, expected,
                             "Dataselect failed for query %s" % repr(query))
            # test output to file
            with NamedTemporaryFile() as tf:
                client.get_waveforms(*query, filename=tf.name)
                with open(tf.name, 'rb') as fh:
                    got = fh.read()
                with open(file_, 'rb') as fh:
                    expected = fh.read()
            self.assertEqual(got, expected,
                             "Dataselect failed for query %s" % repr(query))
Пример #17
0
 def test_write_with_extra_tags_namespace_redef(self):
     """
     Tests the exceptions are raised when namespaces
     are redefined.
     """
     filename = os.path.join(self.data_dir,
                             "stationxml_with_availability.xml")
     # read the StationXML with availability
     inv = obspy.read_inventory(filename)
     with NamedTemporaryFile() as tf:
         # manually add custom namespace definition
         tmpfile = tf.name
         # assert that namespace prefix of xsi raises ValueError
         mynsmap = {'xsi': 'http://bad.custom.ns/'}
         self.assertRaises(ValueError,
                           inv.write,
                           path_or_file_object=tmpfile,
                           format="STATIONXML",
                           nsmap=mynsmap)
         # assert that namespace prefix of None raises ValueError
         mynsmap = {None: 'http://bad.custom.ns/'}
         self.assertRaises(ValueError,
                           inv.write,
                           path_or_file_object=tmpfile,
                           format="STATIONXML",
                           nsmap=mynsmap)
Пример #18
0
    def test_can_read_npz_without_pickle(self, state):
        """
        Ensures that a default PPSD can be written and read without having to
        allow np.load the use of pickle, or that a helpful error message is
        raised if allow_pickle is required. See #2409.
        """
        # Init a test PPSD and empty byte stream.
        ppsd = PPSD.load_npz(state.example_ppsd_npz, allow_pickle=True)
        byte_me = io.BytesIO()
        # Save PPSD to byte stream and rewind to 0.
        ppsd.save_npz(byte_me)
        byte_me.seek(0)
        # Load dict, will raise an exception if pickle is needed.
        loaded_dict = dict(np.load(byte_me, allow_pickle=False))
        assert isinstance(loaded_dict, dict)
        # A helpful error message is issued when allow_pickle is needed.
        with pytest.raises(ValueError, match='Loading PPSD results'):
            PPSD.load_npz(state.example_ppsd_npz)

        ppsd = _internal_get_ppsd()
        # save PPSD in such a way to mock old versions.
        with NamedTemporaryFile(suffix='.npz') as ntemp:
            temp_path = ntemp.name
            self._save_npz_require_pickle(temp_path, ppsd)
            # We should be able to load the files when allowing pickle.
            PPSD.load_npz(temp_path, allow_pickle=True)
            # If not allow_pickle,  a helpful error msg should be raised.
            with pytest.raises(ValueError, match='Loading PPSD results'):
                PPSD.load_npz(temp_path)
Пример #19
0
    def test_read_and_write(self):
        filename = os.path.join(self.path, 'qml-example-1.2-RC3_write.sc3ml')
        catalog = read_events(filename)

        with NamedTemporaryFile() as tf:
            catalog.write(tf, format='SC3ML', validate=True)
            self.assertTrue(filecmp.cmp(filename, tf.name))
Пример #20
0
    def cmp_read_xslt_file(self, sc3ml_file, quakeml_file, validate=True):
        """
        Check if the QuakeML file generated with the XSLT file is the
        same than the one in the data folder. Every available SC3ML
        versions are tested except those for which the file is not
        valid.
        """
        for version in SCHEMA_VERSION:
            read_xslt_filename = os.path.join(
                self.io_directory, 'seiscomp', 'data',
                'sc3ml_%s__quakeml_1.2.xsl' % version,
            )

            transform = etree.XSLT(etree.parse(read_xslt_filename))
            filename = os.path.join(self.path, sc3ml_file)
            sc3ml_doc = self.change_version(filename, version)

            # Only test valid SC3ML file
            if not validate_sc3ml(sc3ml_doc):
                continue

            quakeml_doc = transform(sc3ml_doc)

            with NamedTemporaryFile() as tf:
                tf.write(quakeml_doc)
                if validate:
                    self.assertTrue(_validate_quakeml(tf.name))
                filepath_cmp = os.path.join(self.path, quakeml_file)
                self.assertTrue(filecmp.cmp(filepath_cmp, tf.name))
Пример #21
0
 def test_large_negative_longitude(self):
     event = full_test_event()
     event.origins[0].longitude = -120
     with NamedTemporaryFile(suffix=".out") as tf:
         event.write(tf.name, format="NORDIC")
         event_back = read_events(tf.name)
         _assert_similarity(event, event_back[0])
Пример #22
0
    def test_IRIS_example_queries_station(self):
        """
        Tests the (sometimes modified) example queries given on IRIS webpage.
        """
        client = self.client

        queries = [
            dict(latitude=-56.1, longitude=-26.7, maxradius=15),
            dict(startafter=UTCDateTime("2003-01-07"),
                 endbefore=UTCDateTime("2011-02-07"),
                 minlatitude=15,
                 maxlatitude=55,
                 minlongitude=170,
                 maxlongitude=-170),
            dict(starttime=UTCDateTime("2013-01-01"),
                 network="IU",
                 sta="ANMO",
                 level="channel"),
            dict(starttime=UTCDateTime("2013-01-01"),
                 network="IU",
                 sta="A*",
                 location="00",
                 level="channel",
                 format="text"),
        ]
        result_files = [
            "stations_by_latlon.xml",
            "stations_by_misc.xml",
            "stations_by_station.xml",
            "stations_by_station_wildcard.xml",
        ]
        for query, filename in zip(queries, result_files):
            got = client.get_stations(**query)
            file_ = os.path.join(self.datapath, filename)
            # with open(file_, "wt") as fh:
            #    fh.write(got)
            expected = read_inventory(file_, format="STATIONXML")
            # delete both creating times and modules before comparing objects.
            got.created = None
            expected.created = None
            got.module = None
            expected.module = None

            # XXX Py3k: the objects differ in direct comparision, however,
            # the strings of them are equal
            self.assertEqual(str(got), str(expected), failmsg(got, expected))

            # test output to file
            with NamedTemporaryFile() as tf:
                client.get_stations(filename=tf.name, **query)
                with open(tf.name, 'rb') as fh:
                    got = fh.read()
                with open(file_, 'rb') as fh:
                    expected = fh.read()
            ignore_lines = [
                b'<Created>', b'<TotalNumberStations>', b'<Module>',
                b'<ModuleURI>'
            ]
            msg = failmsg(got, expected, ignore_lines=ignore_lines)
            self.assertEqual(msg, "", msg)
Пример #23
0
    def test_ppsd(self):
        """
        Test PPSD routine with some real data.
        """
        # paths of the expected result data
        file_histogram = os.path.join(
            self.path,
            'BW.KW1._.EHZ.D.2011.090_downsampled__ppsd_hist_stack.npy')
        file_binning = os.path.join(
            self.path, 'BW.KW1._.EHZ.D.2011.090_downsampled__ppsd_mixed.npz')
        file_mode_mean = os.path.join(
            self.path,
            'BW.KW1._.EHZ.D.2011.090_downsampled__ppsd_mode_mean.npz')
        tr, _paz = _get_sample_data()
        st = Stream([tr])
        ppsd = _get_ppsd()
        # read results and compare
        result_hist = np.load(file_histogram)
        self.assertEqual(len(ppsd.times_processed), 4)
        self.assertEqual(ppsd.nfft, 65536)
        self.assertEqual(ppsd.nlap, 49152)
        np.testing.assert_array_equal(ppsd.current_histogram, result_hist)
        # add the same data a second time (which should do nothing at all) and
        # test again - but it will raise UserWarnings, which we omit for now
        with warnings.catch_warnings(record=True):
            warnings.simplefilter('ignore', UserWarning)
            ppsd.add(st)
            np.testing.assert_array_equal(ppsd.current_histogram, result_hist)
        # test the binning arrays
        binning = np.load(file_binning)
        np.testing.assert_array_equal(ppsd.db_bin_edges, binning['spec_bins'])
        np.testing.assert_array_equal(ppsd.period_bin_centers,
                                      binning['period_bins'])

        # test the mode/mean getter functions
        per_mode, mode = ppsd.get_mode()
        per_mean, mean = ppsd.get_mean()
        result_mode_mean = np.load(file_mode_mean)
        np.testing.assert_array_equal(per_mode, result_mode_mean['per_mode'])
        np.testing.assert_array_equal(mode, result_mode_mean['mode'])
        np.testing.assert_array_equal(per_mean, result_mode_mean['per_mean'])
        np.testing.assert_array_equal(mean, result_mode_mean['mean'])

        # test saving and loading of the PPSD (using a temporary file)
        with NamedTemporaryFile(suffix=".npz") as tf:
            filename = tf.name
            # test saving and loading to npz
            ppsd.save_npz(filename)
            ppsd_loaded = PPSD.load_npz(filename)
            ppsd_loaded.calculate_histogram()
            self.assertEqual(len(ppsd_loaded.times_processed), 4)
            self.assertEqual(ppsd_loaded.nfft, 65536)
            self.assertEqual(ppsd_loaded.nlap, 49152)
            np.testing.assert_array_equal(ppsd_loaded.current_histogram,
                                          result_hist)
            np.testing.assert_array_equal(ppsd_loaded.db_bin_edges,
                                          binning['spec_bins'])
            np.testing.assert_array_equal(ppsd_loaded.period_bin_centers,
                                          binning['period_bins'])
Пример #24
0
    def test_fail_writing(self):
        """
        Test a deliberate fail.
        """
        test_event = full_test_event()
        # Add the event to a catalogue which can be used for QuakeML testing
        test_cat = Catalog()
        test_cat += test_event
        test_ev = test_cat[0]
        test_cat.append(full_test_event())
        with self.assertRaises(NordicParsingError):
            # Raises error due to multiple events in catalog
            _write_nordic(test_cat, filename=None, userid='TEST',
                          evtype='L', outdir='.',
                          wavefiles='test', explosion=True,
                          overwrite=True)
        with self.assertRaises(NordicParsingError):
            # Raises error due to too long userid
            _write_nordic(test_ev, filename=None, userid='TESTICLE',
                          evtype='L', outdir='.',
                          wavefiles='test', explosion=True,
                          overwrite=True)
        with self.assertRaises(NordicParsingError):
            # Raises error due to unrecognised event type
            _write_nordic(test_ev, filename=None, userid='TEST',
                          evtype='U', outdir='.',
                          wavefiles='test', explosion=True,
                          overwrite=True)
        with self.assertRaises(NordicParsingError):
            # Raises error due to no output directory
            _write_nordic(test_ev, filename=None, userid='TEST',
                          evtype='L', outdir='albatross',
                          wavefiles='test', explosion=True,
                          overwrite=True)
        invalid_origin = test_ev.copy()

        invalid_origin.origins = []
        with self.assertRaises(NordicParsingError):
            _write_nordic(invalid_origin, filename=None, userid='TEST',
                          evtype='L', outdir='.',
                          wavefiles='test', explosion=True,
                          overwrite=True)
        invalid_origin = test_ev.copy()
        invalid_origin.origins[0].time = None
        with self.assertRaises(NordicParsingError):
            _write_nordic(invalid_origin, filename=None, userid='TEST',
                          evtype='L', outdir='.',
                          wavefiles='test', explosion=True,
                          overwrite=True)
        # Write a near empty origin
        valid_origin = test_ev.copy()
        valid_origin.origins[0].latitude = None
        valid_origin.origins[0].longitude = None
        valid_origin.origins[0].depth = None
        with NamedTemporaryFile() as tf:
            _write_nordic(valid_origin, filename=tf.name, userid='TEST',
                          evtype='L', outdir='.', wavefiles='test',
                          explosion=True, overwrite=True)
            self.assertTrue(os.path.isfile(tf.name))
Пример #25
0
 def wrapped_func(filename, *args, **kwargs):
     if not isinstance(filename, (str, native_str)):
         return func(filename, *args, **kwargs)
     elif not os.path.exists(filename):
         msg = "File not found '%s'" % (filename)
         raise IOError(msg)
     # check if we got a compressed file or archive
     obj_list = []
     if tarfile.is_tarfile(filename):
         try:
             # reading with transparent compression
             with tarfile.open(filename, 'r|*') as tar:
                 for tarinfo in tar:
                     # only handle regular files
                     if not tarinfo.isfile():
                         continue
                     data = tar.extractfile(tarinfo).read()
                     obj_list.append(data)
         except:
             pass
     elif zipfile.is_zipfile(filename):
         try:
             zip = zipfile.ZipFile(filename)
             obj_list = [zip.read(name) for name in zip.namelist()]
         except:
             pass
     elif filename.endswith('.bz2'):
         # bz2 module
         try:
             import bz2
             with open(filename, 'rb') as fp:
                 obj_list.append(bz2.decompress(fp.read()))
         except:
             pass
     elif filename.endswith('.gz'):
         # gzip module
         try:
             import gzip
             with gzip.open(filename, 'rb') as fp:
                 obj_list.append(fp.read())
         except:
             pass
     # handle results
     if obj_list:
         # write results to temporary files
         result = None
         for obj in obj_list:
             with NamedTemporaryFile() as tempfile:
                 tempfile._fileobj.write(obj)
                 stream = func(tempfile.name, *args, **kwargs)
                 # just add other stream objects to first stream
                 if result is None:
                     result = stream
                 else:
                     result += stream
     else:
         # no compressions
         result = func(filename, *args, **kwargs)
     return result
Пример #26
0
 def test_write_select(self):
     cat = read_events()
     with NamedTemporaryFile(suffix='.out') as tf:
         write_select(cat, filename=tf.name)
         cat_back = read_events(tf.name)
         for event_1, event_2 in zip(cat, cat_back):
             self.assertTrue(test_similarity(event_1=event_1,
                                             event_2=event_2))
Пример #27
0
    def test_write_example(self):
        filename = os.path.join(self.path, 'qml-example-1.2-RC3.xml')
        catalog = _read_quakeml(filename)

        with NamedTemporaryFile() as tf:
            catalog.write(tf, format='SC3ML', validate=True)
            filepath_cmp = \
                os.path.join(self.path, 'qml-example-1.2-RC3_write.sc3ml')
            self.assertTrue(filecmp.cmp(filepath_cmp, tf.name))
Пример #28
0
 def test_write_plugin(self):
     cat = read_events()
     cat.append(full_test_event())
     with NamedTemporaryFile(suffix='.out') as tf:
         cat.write(tf.name, format='nordic')
         cat_back = read_events(tf.name)
         for event_1, event_2 in zip(cat, cat_back):
             self.assertTrue(test_similarity(event_1=event_1,
                                             event_2=event_2))
Пример #29
0
 def test_raise_on_unknown_format(self):
     """
     Test case for issue #338:
     """
     with NamedTemporaryFile() as tf:
         tmpfile = tf.name
         # create empty file
         open(tmpfile, 'wb').close()
         # using format keyword
         self.assertRaises(TypeError, read, tmpfile)
Пример #30
0
 def test_write_select(self):
     cat = read_events()
     with NamedTemporaryFile(suffix='.out') as tf:
         # raises "UserWarning: mb is not convertible"
         with warnings.catch_warnings():
             warnings.simplefilter('ignore', UserWarning)
             write_select(cat, filename=tf.name)
         cat_back = read_events(tf.name)
         for event_1, event_2 in zip(cat, cat_back):
             _assert_similarity(event_1=event_1, event_2=event_2)