Ejemplo n.º 1
0
def make_diverse_catalog_list(*args):  # NOQA
    """
    Make a list of diverse catalogs.

    Creates several copies of the diverse catalog, which is returned from
    :func:`~obspy.core.util.testing.create_diverse_catalog`. Copies are
    created with the copy method and reading a quakeml representation from
    a byte string.

    The unused args is necessary for thread-pool mapping.
    """
    # create a complex catalog
    cat1 = create_diverse_catalog()
    bytes_io = io.BytesIO()
    cat1.write(bytes_io, 'quakeml')
    # get a few copies from reading from bytes
    cat2 = read_events(bytes_io)
    cat3 = read_events(bytes_io)
    # make more catalogs with copy method
    cat4 = cat1.copy()
    cat5 = cat4.copy()
    # ensure creating a copying and deleting doesnt mess up id tracking
    cat_to_delete = cat2.copy()
    del cat_to_delete
    # pickle and unpickle catalog
    cat_bytes = pickle.dumps(cat4)
    cat6 = pickle.loads(cat_bytes)
    return [cat1, cat2, cat3, cat4, cat5, cat6]
Ejemplo n.º 2
0
    def test_catalog_resource_ids(self):
        """
        Basic tests on the catalog resource ids.
        """
        cat1 = read_events()
        # The resource_id attached to the first event is self-pointing
        self.assertIs(cat1[0], cat1[0].resource_id.get_referred_object())
        # make a copy and re-read catalog
        cat2 = cat1.copy()
        cat3 = read_events()
        # the resource_id on the new catalogs point to attached objects
        self.assertIs(cat1[0], cat1[0].resource_id.get_referred_object())
        self.assertIs(cat2[0], cat2[0].resource_id.get_referred_object())
        self.assertIs(cat3[0], cat3[0].resource_id.get_referred_object())
        # now delete cat1 and make sure cat2 and cat3 still work
        del cat1
        self.assertIs(cat2[0], cat2[0].resource_id.get_referred_object())
        self.assertIs(cat3[0], cat3[0].resource_id.get_referred_object())
        # create a resource_id with the same id as the last defined object
        # with the same resource id (that is still in scope) is returned
        new_id = cat2[0].resource_id.id
        rid = ResourceIdentifier(new_id)

        self.assertIs(rid.get_referred_object(), cat3[0])
        del cat3

        gc.collect()  # Call gc to ensure WeakValueDict works
        # raises UserWarning, suppress to keep std out cleaner
        with WarningsCapture():
            self.assertIs(rid.get_referred_object(), cat2[0])
            del cat2
            self.assertIs(rid.get_referred_object(), None)
Ejemplo n.º 3
0
 def test_seconds_overflow(self):
     """
     #2348 indicates that SEISAN sometimes overflows seconds into column 29.
     """
     with warnings.catch_warnings():
         warnings.simplefilter('ignore', UserWarning)
         cat = read_events(
             os.path.join(self.testing_path, "sfile_seconds_overflow"))
     event = cat[0]
     pick_times = {
         "LSb2": UTCDateTime(2009, 7, 2, 6, 49) + 100.24}
     for key, value in pick_times.items():
         pick = [p for p in event.picks
                 if p.waveform_id.station_code == key]
         self.assertEqual(len(pick), 1)
         self.assertEqual(pick[0].time, value)
     with NamedTemporaryFile(suffix=".out") as tf:
         write_select(cat, filename=tf.name)
         with warnings.catch_warnings():
             warnings.simplefilter('ignore', UserWarning)
             cat_back = read_events(tf.name)
     self.assertEqual(len(cat_back), 1)
     for key, value in pick_times.items():
         pick = [p for p in cat_back[0].picks
                 if p.waveform_id.station_code == key]
         self.assertEqual(len(pick), 1)
         self.assertEqual(pick[0].time, value)
Ejemplo n.º 4
0
    def test_read_invalid_filename(self):
        """
        Tests that we get a sane error message when calling read_events()
        with a filename that doesn't exist
        """
        doesnt_exist = 'dsfhjkfs'
        for i in range(10):
            if os.path.exists(doesnt_exist):
                doesnt_exist += doesnt_exist
                continue
            break
        else:
            self.fail('unable to get invalid file path')
        doesnt_exist = native_str(doesnt_exist)

        if PY2:
            exception_type = getattr(builtins, 'IOError')
        else:
            exception_type = getattr(builtins, 'FileNotFoundError')
        exception_msg = "[Errno 2] No such file or directory: '{}'"

        formats = _get_entry_points(
            'obspy.plugin.catalog', 'readFormat').keys()
        # try read_inventory() with invalid filename for all registered read
        # plugins and also for filetype autodiscovery
        formats = [None] + list(formats)
        for format in formats:
            with self.assertRaises(exception_type) as e:
                read_events(doesnt_exist, format=format)
            self.assertEqual(
                str(e.exception), exception_msg.format(doesnt_exist))
Ejemplo n.º 5
0
 def test_write_select(self):
     cat = read_events()
     with NamedTemporaryFile(suffix='.out') as tf:
         write_select(cat, filename=tf.name)
         cat_back = read_events(tf.name)
         for event_1, event_2 in zip(cat, cat_back):
             self.assertTrue(test_similarity(event_1=event_1,
                                             event_2=event_2))
Ejemplo n.º 6
0
    def test_write_catalog_shapefile_with_extra_field(self):
        """
        Tests writing a catalog with an additional custom database column
        """
        cat = read_events('/path/to/mchedr.dat')
        cat += read_events('/path/to/nlloc.qml')
        extra_fields = [('Region', 'C', 50, None,
                        ['SOUTHEAST OF HONSHU, JAPAN', 'GERMANY'])]
        bad_extra_fields_wrong_length = [('Region', 'C', 50, None, ['ABC'])]
        bad_extra_fields_name_clash = [('Magnitude', 'C', 50, None, ['ABC'])]

        with TemporaryWorkingDirectory():
            with warnings.catch_warnings(record=True) as w:
                warnings.filterwarnings('always')
                # test some bad calls that should raise an Exception
                with self.assertRaises(ValueError) as cm:
                    _write_shapefile(
                        cat, "catalog.shp",
                        extra_fields=bad_extra_fields_wrong_length)
                self.assertEqual(
                    str(cm.exception), "list of values for each item in "
                    "'extra_fields' must have same length as Catalog object")
                with self.assertRaises(ValueError) as cm:
                    _write_shapefile(
                        cat, "catalog.shp",
                        extra_fields=bad_extra_fields_name_clash)
                self.assertEqual(
                    str(cm.exception), "Conflict with existing field named "
                    "'Magnitude'.")
                # now test a good call that should work
                _write_shapefile(cat, "catalog.shp", extra_fields=extra_fields)
            for w_ in w:
                try:
                    self.assertEqual(
                        str(w_.message),
                        'Encountered an event with origin uncertainty '
                        'description of type "confidence ellipsoid". This is '
                        'not yet implemented for output as shapefile. No '
                        'origin uncertainty will be added to shapefile for '
                        'such events.')
                except AssertionError:
                    continue
                break
            else:
                raise
            for suffix in SHAPEFILE_SUFFIXES:
                self.assertTrue(os.path.isfile("catalog" + suffix))
            with open("catalog.shp", "rb") as fh_shp, \
                    open("catalog.dbf", "rb") as fh_dbf, \
                    open("catalog.shx", "rb") as fh_shx:
                shp = shapefile.Reader(shp=fh_shp, shx=fh_shx, dbf=fh_dbf)
                # check contents of shapefile that we just wrote
                _assert_records_and_fields(
                    got_fields=shp.fields, got_records=shp.records(),
                    expected_fields=expected_catalog_fields_with_region,
                    expected_records=expected_catalog_records_with_region)
                self.assertEqual(shp.shapeType, shapefile.POINT)
                _close_shapefile_reader(shp)
Ejemplo n.º 7
0
 def test_write_plugin(self):
     cat = read_events()
     cat.append(full_test_event())
     with NamedTemporaryFile(suffix='.out') as tf:
         cat.write(tf.name, format='nordic')
         cat_back = read_events(tf.name)
         for event_1, event_2 in zip(cat, cat_back):
             self.assertTrue(test_similarity(event_1=event_1,
                                             event_2=event_2))
Ejemplo n.º 8
0
 def test_read_events_with_wildcard(self):
     """
     Tests the read_events() function with a filename wild card.
     """
     # without wildcard..
     expected = read_events(self.iris_xml)
     expected += read_events(self.neries_xml)
     # with wildcard
     got = read_events(os.path.join(self.path, "*_events.xml"))
     self.assertEqual(expected, got)
Ejemplo n.º 9
0
 def test_write_select(self):
     cat = read_events()
     with NamedTemporaryFile(suffix='.out') as tf:
         # raises "UserWarning: mb is not convertible"
         with warnings.catch_warnings():
             warnings.simplefilter('ignore', UserWarning)
             write_select(cat, filename=tf.name)
         cat_back = read_events(tf.name)
         for event_1, event_2 in zip(cat, cat_back):
             _assert_similarity(event_1=event_1, event_2=event_2)
Ejemplo n.º 10
0
    def test_reading_bytes_io(self):
        filename = os.path.join(self.testing_path, '01-0411-15L.S201309')
        with open(filename, "rb") as fh:
            file_object = io.BytesIO(fh.read())

        cat = read_events(file_object)
        file_object.close()

        ref_cat = read_events(filename)
        self.assertTrue(test_similarity(cat[0], ref_cat[0]))
Ejemplo n.º 11
0
def test_check_events_consistent():
    cat1 = read_events(EVENTFILE)[0]
    cat2 = read_events(EVENTFILE)[0]

    events = {"file1": cat1, "file2": cat2}
    sa.check_events_consistent(events)

    cat2.event_descriptions = []
    with pytest.raises(ValueError):
        sa.check_events_consistent(events)
Ejemplo n.º 12
0
    def test_read_quakeml(self):
        """
        Test reading a QuakeML file via read_events.
        """
        filename = os.path.join(self.path, 'qml-example-1.2-RC3.xml')
        with self.assertRaises(ValueError) as e:
            read_events(filename, format='SC3ML')

        expected_message = "Not a SC3ML compatible file or string."
        self.assertEqual(e.exception.args[0], expected_message)
Ejemplo n.º 13
0
    def test_reading_using_obspy_plugin(self):
        """
        Checks that reading with the read_events() function works correctly.
        """
        filename = os.path.join(self.datapath, "C200604092050A.ndk")
        cat = read_events(filename)

        reference = os.path.join(self.datapath, "C200604092050A.xml")
        ref_cat = read_events(reference)

        self.assertEqual(cat, ref_cat)
Ejemplo n.º 14
0
    def test_reading_from_open_file_in_binary_mode(self):
        """
        Tests reading from an open file in binary mode.
        """
        filename = os.path.join(self.datapath, "C200604092050A.ndk")
        with open(filename, "rb") as fh:
            cat = read_events(fh)

        reference = os.path.join(self.datapath, "C200604092050A.xml")
        ref_cat = read_events(reference)

        self.assertEqual(cat, ref_cat)
Ejemplo n.º 15
0
 def test_read_nlloc_with_picks(self):
     """
     Test correct resource ID linking when reading NLLOC_HYP file with
     providing original picks.
     """
     picks = read_events(get_example_file("nlloc_custom.qml"))[0].picks
     arrivals = read_events(
         get_example_file("nlloc_custom.hyp"), format="NLLOC_HYP",
         picks=picks)[0].origins[0].arrivals
     expected = [p.resource_id for p in picks]
     got = [a.pick_id for a in arrivals]
     self.assertEqual(expected, got)
Ejemplo n.º 16
0
 def test_write_plugin(self):
     cat = read_events()
     cat.append(full_test_event())
     with NamedTemporaryFile(suffix='.out') as tf:
         # raises UserWarning: mb is not convertible
         with warnings.catch_warnings():
             warnings.simplefilter('ignore', UserWarning)
             cat.write(tf.name, format='nordic')
         # raises "UserWarning: AIN in header, currently unsupported"
         with warnings.catch_warnings():
             warnings.simplefilter('ignore', UserWarning)
             cat_back = read_events(tf.name)
         for event_1, event_2 in zip(cat, cat_back):
             _assert_similarity(event_1=event_1, event_2=event_2)
Ejemplo n.º 17
0
    def test_reading_bytes_io(self):
        filename = os.path.join(self.testing_path, '01-0411-15L.S201309')
        with open(filename, "rb") as fh:
            file_object = io.BytesIO(fh.read())

        # raises "UserWarning: AIN in header, currently unsupported"
        with warnings.catch_warnings():
            warnings.simplefilter('ignore', UserWarning)

            cat = read_events(file_object)
            file_object.close()

            ref_cat = read_events(filename)
            _assert_similarity(cat[0], ref_cat[0])
Ejemplo n.º 18
0
def eqview(request):
	try:
		localcat = read_events('nezsite/media/seismic/events/evtlocal30days.xml', 'QUAKEML')
		globalcat = read_events('nezsite/media/seismic/events/evtmajor30days.xml', 'QUAKEML')
	except:
		localcat, globalcat = Catalog(), Catalog()

	context = {
		'localcat': localcat,
		'globalcat': globalcat,
		'time': timezone.now(),
		'page': 'Recent Earthquakes',
	}
	return render(request, 'shake/eqview.html', context)
Ejemplo n.º 19
0
    def test_reading_from_bytes_io(self):
        """
        Tests reading from BytesIO.
        """
        filename = os.path.join(self.datapath, "C200604092050A.ndk")
        with open(filename, "rb") as fh:
            file_object = io.BytesIO(fh.read())

        cat = read_events(file_object)
        file_object.close()

        reference = os.path.join(self.datapath, "C200604092050A.xml")
        ref_cat = read_events(reference)

        self.assertEqual(cat, ref_cat)
Ejemplo n.º 20
0
 def test_read_events(self):
     """
     Tests the read_events() function using entry points.
     """
     # iris
     catalog = read_events(self.iris_xml)
     self.assertEqual(len(catalog), 2)
     self.assertEqual(catalog[0]._format, 'QUAKEML')
     self.assertEqual(catalog[1]._format, 'QUAKEML')
     # neries
     catalog = read_events(self.neries_xml)
     self.assertEqual(len(catalog), 3)
     self.assertEqual(catalog[0]._format, 'QUAKEML')
     self.assertEqual(catalog[1]._format, 'QUAKEML')
     self.assertEqual(catalog[2]._format, 'QUAKEML')
Ejemplo n.º 21
0
    def test_write_no_preferred_focal_mechanism(self):
        """
        Tests that writing a CMTSOLUTION file with no preferred (but at least
        one) focal mechanism works, see #1303.
        """
        filename = os.path.join(self.datapath, "CMTSOLUTION")
        with open(filename, "rb") as fh:
            data = fh.read()

        cat = obspy.read_events(filename)
        cat[0].preferred_focal_mechanism_id = None

        with NamedTemporaryFile() as tf:
            temp_filename = tf.name

        try:
            cat.write(temp_filename, format="CMTSOLUTION")
            with open(temp_filename, "rb") as fh:
                new_data = fh.read()
        finally:
            try:
                os.remove(temp_filename)
            except:
                pass

        self.assertEqual(data.decode().splitlines(),
                         new_data.decode().splitlines())
Ejemplo n.º 22
0
    def test_write_nlloc_obs(self):
        """
        Test writing nonlinloc observations phase file.
        """
        # load nlloc_custom.qml QuakeML file to generate OBS file from it
        filename = get_example_file("nlloc_custom.qml")
        cat = read_events(filename, "QUAKEML")
        # adjust one pick time that got cropped by nonlinloc in NLLOC HYP file
        # due to less precision in hypocenter file (that we used to create the
        # reference QuakeML file)
        for pick in cat[0].picks:
            if pick.waveform_id.station_code == "UH4" and \
               pick.phase_hint == "P":
                pick.time -= 0.005

        # read expected OBS file output
        filename = get_example_file("nlloc.obs")
        with open(filename, "rb") as fh:
            expected = fh.read().decode()

        # write via plugin
        with NamedTemporaryFile() as tf:
            cat.write(tf, format="NLLOC_OBS")
            tf.seek(0)
            got = tf.read().decode()

        self.assertEqual(expected, got)

        # write manually
        with NamedTemporaryFile() as tf:
            write_nlloc_obs(cat, tf)
            tf.seek(0)
            got = tf.read().decode()

        self.assertEqual(expected, got)
Ejemplo n.º 23
0
 def test_str(self):
     """
     Testing the __str__ method of the Catalog object.
     """
     catalog = read_events()
     self.assertTrue(catalog.__str__().startswith("3 Event(s) in Catalog:"))
     self.assertTrue(catalog.__str__().endswith("37.736 | 3.0 ML | manual"))
Ejemplo n.º 24
0
    def test_read_and_write_multiple_events_from_bytes_io(self):
        """
        Tests that reading and writing a CMTSOLUTION file with multiple
        events does not change anything.

        This time it tests reading from and writing to BytesIO objects.
        """
        filename = os.path.join(self.datapath, "MULTIPLE_EVENTS")
        with open(filename, "rb") as fh:
            buf = io.BytesIO(fh.read())
            data = buf.read()
            buf.seek(0, 0)

        with buf:
            buf.seek(0, 0)
            cat = obspy.read_events(buf)

            self.assertEqual(len(cat), 4)

            with io.BytesIO() as buf2:
                cat.write(buf2, format="CMTSOLUTION")
                buf2.seek(0, 0)
                new_data = buf2.read()

        self.assertEqual(data.decode().splitlines(),
                         new_data.decode().splitlines())
Ejemplo n.º 25
0
 def test_large_negative_longitude(self):
     event = full_test_event()
     event.origins[0].longitude = -120
     with NamedTemporaryFile(suffix=".out") as tf:
         event.write(tf.name, format="NORDIC")
         event_back = read_events(tf.name)
         _assert_similarity(event, event_back[0])
Ejemplo n.º 26
0
    def test_read_and_write_cmtsolution_from_files(self):
        """
        Tests that reading and writing a CMTSOLUTION file does not change
        anything.
        """
        filename = os.path.join(self.datapath, "CMTSOLUTION")
        with open(filename, "rb") as fh:
            data = fh.read()

        cat = obspy.read_events(filename)

        with NamedTemporaryFile() as tf:
            temp_filename = tf.name

        try:
            cat.write(temp_filename, format="CMTSOLUTION")
            with open(temp_filename, "rb") as fh:
                new_data = fh.read()
        finally:
            try:
                os.remove(temp_filename)
            except:
                pass

        self.assertEqual(data.decode().splitlines(),
                         new_data.decode().splitlines())
Ejemplo n.º 27
0
    def test_read_and_write(self):
        filename = os.path.join(self.path, 'qml-example-1.2-RC3_write.sc3ml')
        catalog = read_events(filename)

        with NamedTemporaryFile() as tf:
            catalog.write(tf, format='SC3ML', validate=True)
            self.assertTrue(filecmp.cmp(filename, tf.name))
Ejemplo n.º 28
0
    def test_write_cnv(self):
        """
        Test writing CNV catalog summary file.
        """
        # load QuakeML file to generate CNV file from it
        filename = os.path.join(self.datapath, "obspyck_20141020150701.xml")
        cat = read_events(filename, format="QUAKEML")

        # read expected OBS file output
        filename = os.path.join(self.datapath, "obspyck_20141020150701.cnv")
        with open(filename, "rb") as fh:
            expected = fh.read().decode()

        # write via plugin
        with NamedTemporaryFile() as tf:
            cat.write(tf, format="CNV")
            tf.seek(0)
            got = tf.read().decode()

        self.assertEqual(expected, got)

        # write manually
        with NamedTemporaryFile() as tf:
            _write_cnv(cat, tf)
            tf.seek(0)
            got = tf.read().decode()

        self.assertEqual(expected, got)
Ejemplo n.º 29
0
 def test_event_copying_does_not_raise_duplicate_resource_id_warning(self):
     """
     Tests that copying an event does not raise a duplicate resource id
     warning.
     """
     ev = read_events()[0]
     with warnings.catch_warnings(record=True) as w:
         warnings.simplefilter("always")
         ev2 = copy.copy(ev)
         self.assertEqual(len(w), 0)
         ev3 = copy.deepcopy(ev)
         self.assertEqual(len(w), 0)
     # The two events should compare equal.
     self.assertEqual(ev, ev2)
     self.assertEqual(ev, ev3)
     # get resource_ids and referred objects from each of the events
     rid1 = ev.resource_id
     rid2 = ev2.resource_id
     rid3 = ev3.resource_id
     rob1 = rid1.get_referred_object()
     rob2 = rid2.get_referred_object()
     rob3 = rid3.get_referred_object()
     # A shallow copy should just use the exact same resource identifier,
     # while a deep copy should not, although they should be equal.
     self.assertIs(rid1, rid2)
     self.assertIsNot(rid1, rid3)
     self.assertEqual(rid1, rid3)
     # copy should point to the same object, deep copy should not
     self.assertIs(rob1, rob2)
     self.assertIsNot(rob1, rob3)
     # although the referred objects should be equal
     self.assertEqual(rob1, rob3)
Ejemplo n.º 30
0
def test_get_synthetics():
    # Try to load 3 stations, out of which 2 are in range for P
    db = instaseis.open_db('syngine://prem_a_20s')
    cat = obspy.read_events('./stfinv/data/virginia.xml')
    st = read('./stfinv/data/dis.II.BFO.00.BHZ')
    st += read('./stfinv/data/dis.GE.DAG..BHZ')
    st += read('./stfinv/data/dis.G.CRZF.00.BHZ')
    st_data, st_syn = st.get_synthetics(db=db, origin=cat[0].origins[0],
                                        out_dir='/tmp')

    npt.assert_equal(len(st_data), 2)
    npt.assert_equal(len(st_syn), 12)

    for istat in range(0, 2):
        channels = ['MPP', 'MRP', 'MRR', 'MRT', 'MTP', 'MTT']
        for channel in channels:
            st_test = st_syn.select(station=st_data[istat].stats.station,
                                    network=st_data[istat].stats.network,
                                    location=st_data[istat].stats.location,
                                    channel=channel)
            npt.assert_equal(len(st_test), 1)

        for tr in st_syn[istat * 6:(istat + 1) * 6]:
            npt.assert_string_equal(str(tr.stats.station),
                                    str(st_data[istat].stats.station))
            npt.assert_string_equal(str(tr.stats.location),
                                    str(st_data[istat].stats.location))
            npt.assert_string_equal(str(tr.stats.network),
                                    str(st_data[istat].stats.network))

            npt.assert_equal(tr.stats.npts, st_data[istat].stats.npts)
            npt.assert_allclose(tr.stats.delta, st_data[istat].stats.delta)
            npt.assert_allclose(float(tr.stats.starttime),
                                float(st_data[istat].stats.starttime))
Ejemplo n.º 31
0
    def __init__(self,
                 phase,
                 rot,
                 evtloc,
                 statloc,
                 rawloc,
                 preproloc,
                 rfloc,
                 deconmeth,
                 starttime,
                 endtime,
                 wavdownload=True,
                 pol: str = 'v',
                 minmag: float or int = 5.5,
                 event_coords=None,
                 network=None,
                 station=None,
                 waveform_client=None,
                 re_client=['IRIS'],
                 evtcat=None,
                 debug=False):
        """
        Create object that is used to start the receiver function
        workflow.

        :param phase: Arrival phase that is to be used as source phase.
            "S" to create S-Sp receiver functions and "P" for P-Ps receiver
            functions, "SKS" or "ScS" are allowed as well.
        :type phase: str
        :param rot: The coordinate system in that the seismogram should be
            rotated
            prior to deconvolution. Options are "RTZ" for radial, transverse,
            vertical; "LQT" for an orthogonal coordinate system computed by
            minimising primary energy on the
            converted component, or "PSS" for a rotation along the polarisation
            directions using the Litho1.0 surface wave tomography model.
        :type rot: str
        :param evtloc: Directory, in which to store the event catalogue (xml).
        :type evtloc: str
        :param statloc: Directory, in which to store the station inventories
                        (xml).
        :type statloc: str
        :param rawloc: Directory, in which to store the raw waveform data.
        :type rawloc: str
        :param preproloc: Directory, in which to store
            the preprocessed waveform data (mseed).
        :type preproloc: str
        :param rfloc: Directory, in which to store the receiver functions in
            time domain (sac).
        :type rfloc: str
        :param deconmeth: The deconvolution method to use for the RF creation.
            Possible options are:
            'it': iterative time domain deconvolution (Ligorria & Ammon, 1999)
            'dampedf': damped frequency deconvolution
            'fqd': frequency dependent damping - not a good choice for SRF
            'waterlevel': Langston (1977)
            'multit': for multitaper (Helffrich, 2006)
            False/None: don't create RFs
        :type deconmeth: str
        :param starttime: Earliest event date to be considered.
        :type starttime: ~obspy.UTCDateTime
        :param endtime: Latest event date to be considered.
        :type endtime: ~obspy.UTCDateTime
        :param wavdownload: Do you want to start a new download (True),
            update the current database (True) or only preprocess and create
            RFs from an existing database (False). False is a lot faster as all
            CPUs can be used and the preprocessing does not have to wait for
            the download, defaults to True.
        :type wavdownload: bool, optional
        :param pol: Polarisation to use as source wavelet. Either "v" for
            vertically polarised or 'h' for horizontally polarised S-waves.
            Will be ignored if phase='S', by default 'v'.
        :type pol: str, optional
        :param minmag: Minimum magnitude, by default 5.5
        :type minmag: float, optional
        :param event_coords: In case you wish to constrain events to certain
            origns. Given in the form (minlat, maxlat, minlon, maxlon),
            by default None.
        :type event_coords: Tuple, optional
        :param network: Limit the dowloand and preprocessing to a certain
            network or several networks (if type==list).
            Wildcards are allowed, by default None., defaults to None
        :type network: str or list, optional
        :param station: Limit the download and preprocessing to a certain
            station or several stations. Use only if network!=None.
            Wildcards are allowed, by default None.
        :type station: str or list, optional
        :param waveform_client: List of FDSN compatible servers to download
            waveforms from.
            See obspy documentation for obspy.Client for allowed acronyms.
            A list of servers by region can be found at
            `<https://www.fdsn.org/webservices/datacenters/>`_. None means
            that all known servers are requested, defaults to None.
        :type waveform_client: list, optional
        :param re_client: Only relevant, when debug=True. List of servers that
            will be used if data is missing and the script will attempt a
            redownload, usually it's easier to just run a request several
            times. Same logic as for waveform_client applies,
            defaults to ['IRIS']
        :type re_client: list, optional
        :param evtcat: In case you want to use an already existing event
            catalogue
            in evtloc. If None a new catalogue will be downloaded (with the
            parameters defined before), by default None, defaults to None
        :type evtcat: str, optional
        :param debug: If True, all loggers will go to DEBUG mode and all
            warnings
            will be shown. That will result in a lot of information being
            shown! Also joblib will fall back to using only few cores,
            by default False.
        :type debug: bool, optional
        :raises NameError: For invalid phases.
        """

        # Allocate variables in self
        self.debug = debug
        self.wavdownload = wavdownload
        tmp.re_client = re_client

        # Set velocity model
        self.model = TauPyModel('iasp91')

        self.phase = phase[:-1] + phase[-1].upper()
        self.pol = pol.lower()
        self.rot = rot.upper()
        self.deconmeth = deconmeth

        # Directories
        self.logdir = os.path.join(os.path.dirname(os.path.abspath(statloc)),
                                   'logs')
        os.makedirs(self.logdir, exist_ok=True)
        self.evtloc = evtloc
        self.statloc = statloc
        self.rawloc = os.path.join(rawloc, self.phase)
        self.preproloc = os.path.join(preproloc, self.phase)
        self.rfloc = os.path.join(rfloc, self.phase)

        # minimum magnitude
        self.minmag = minmag

        # Request time window
        self.starttime = starttime
        self.endtime = endtime

        # geographical constraints
        if event_coords:
            (self.eMINLAT, self.eMAXLAT, self.eMINLON,
             self.eMAXLON) = event_coords
        else:
            (self.eMINLAT, self.eMAXLAT, self.eMINLON,
             self.eMAXLON) = None, None, None, None

        # Set event depth and min/max epicentral distances
        # according to phase (see Wilson et. al., 2006)
        # and time window before (tz) and after (ta) first arrival
        self.ta = 120
        if self.phase == 'P':
            self.maxdepth = None
            self.min_epid = 28.1
            self.max_epid = 95.8
            self.tz = 30
        elif self.phase == 'S':
            self.maxdepth = 300
            self.min_epid = 55
            self.max_epid = 80
            self.tz = 120
        # (see Yuan et al. 2006)
        elif self.phase.upper() == 'SCS':
            self.maxdepth = 300
            self.min_epid = 50
            self.max_epid = 75
            self.tz = 120
        elif self.phase.upper() == 'SKS':
            # (see Zhang et. al. (2014))
            self.maxdepth = 300
            self.min_epid = 90
            self.max_epid = 120
            self.tz = 120
        else:
            raise NameError(
                'The phase', self.phase, """is not valid or not
                            implemented yet.""")

        # network and station filters
        self.network = network
        self.station = station

        # Server settings
        # 2021/02/16 Events only from IRIS as the USGS webserice tends to be
        # unstable and mixing different services will lead to a messed db
        self.webclient = Webclient('IRIS')

        self.waveform_client = waveform_client
        self.re_client = re_client

        # Download or process available data?
        if evtcat:
            self.evtcat = read_events(os.path.join(self.evtloc, evtcat))
        else:
            self.download_eventcat()
Ejemplo n.º 32
0
 def test_catalog_resource_id(self):
     """
     See #662
     """
     cat = read_events(self.neries_xml)
     assert str(cat.resource_id) == r"smi://eu.emsc/unid"
Ejemplo n.º 33
0
def mktemplates(
        network_code='GEONET',
        publicIDs=['2016p008122', '2016p008353', '2016p008155',
                   '2016p008194']):
    """Functional wrapper to make templates"""

    from collections import Counter
    from eqcorrscan.core import template_gen

    # This import section copes with namespace changes between obspy versions
    import obspy
    if int(obspy.__version__.split('.')[0]) >= 1:
        from obspy.clients.fdsn import Client
        from obspy import read_events
    else:
        from obspy.fdsn import Client
        from obspy import readEvents as read_events
    from obspy.core.event import Catalog

    # We want to download some QuakeML files from the New Zealand GeoNet
    # network, GeoNet currently doesn't support FDSN event queries, so we
    # have to work around to download quakeml from their quakeml.geonet site.

    client = Client(network_code)
    # We want to download a few events from an earthquake sequence, these are
    # identified by publiID numbers, given as arguments

    catalog = Catalog()
    for publicID in publicIDs:
        if network_code == 'GEONET':
            data_stream = client._download('http://quakeml.geonet.org.nz/' +
                                           'quakeml/1.2/' + publicID)
            data_stream.seek(0, 0)
            catalog += read_events(data_stream, format="quakeml")
            data_stream.close()
        else:
            catalog += client.get_events(eventid=publicID,
                                         includearrivals=True)

    # Lets plot the catalog to see what we have
    catalog.plot(projection='local', resolution='h')

    # We don't need all the picks, lets take the information from the
    # five most used stations
    all_picks = []
    for event in catalog:
        all_picks += [(pick.waveform_id.station_code) for pick in event.picks]
    all_picks = Counter(all_picks).most_common(5)
    all_picks = [pick[0] for pick in all_picks]

    for event in catalog:
        if len(event.picks) == 0:
            raise IOError('No picks found')
        event.picks = [
            pick for pick in event.picks
            if pick.waveform_id.station_code in all_picks
        ]

    # Now we can generate the templates
    templates = template_gen.from_client(catalog=catalog,
                                         client_id=network_code,
                                         lowcut=2.0,
                                         highcut=9.0,
                                         samp_rate=20.0,
                                         filt_order=4,
                                         length=3.0,
                                         prepick=0.15,
                                         swin='all',
                                         debug=1,
                                         plot=True)

    # We now have a series of templates! Using Obspys Stream.write() method we
    # can save these to disk for later use.  We will do that now for use in the
    # following tutorials.
    for i, template in enumerate(templates):
        template.write('tutorial_template_' + str(i) + '.ms', format='MSEED')
        # Note that this will warn you about data types.  As we don't care
        # at the moment, whatever obspy chooses is fine.
    return
Ejemplo n.º 34
0
                            longitude=args.lon,
                            maxradius=args.radius,
                            mindepth=args.min_dep,
                            maxdepth=args.max_dep)
except FDSNNoDataException:
    sys.exit('No events within given constraints.')

for eve in cat:
    # Get focal mechanisms.
    event_id = str(eve.preferred_origin_id).split('/')[4]
    ev = get_event_by_id(event_id)
    print("EVENT ID: {}".format(event_id))
    try:
        mt = ev.getProducts('moment-tensor')[0]
        qml = mt.getContentURL('quakeml.xml')
        ev = read_events(qml)[0]
        nps = ev.focal_mechanisms[0].nodal_planes
    except AttributeError:
        nps = 0
    # Print event information.
    timestring = eve.origins[0].time.strftime("%Y-%m-%d %H:%M:%S")
    locstring = "Lat: {:.2f}, Lon: {:.2f}, Depth: {:.2f}".format(
        eve.origins[0].latitude, eve.origins[0].longitude,
        eve.origins[0].depth / 1000)
    if nps:
        magstring = "Magnitude: {:.2f} {}\nNodal Planes (S, D, R): ({},{},{}), ({},{},{})".format(
            eve.magnitudes[0].mag, eve.magnitudes[0].magnitude_type,
            nps.nodal_plane_1.strike, nps.nodal_plane_1.dip,
            nps.nodal_plane_1.rake, nps.nodal_plane_2.strike,
            nps.nodal_plane_2.dip, nps.nodal_plane_2.rake)
    else:
Ejemplo n.º 35
0
 def events_from_catalog(self):
     """ read events from a events object """
     cat = obspy.read_events()
     return events_to_df(cat)
Ejemplo n.º 36
0
 def test_cat_to_df_method(self):
     """ ensure the events object has the to_df method bolted on """
     cat = obspy.read_events()
     df = cat.to_df()
     assert isinstance(df, pd.DataFrame)
     assert len(df) == len(cat)
Ejemplo n.º 37
0
x, y = m(lon, lat)

lon, lat, layer = LMC.get_slice(piercedepth, whattoplot='votesslow')
x, y = m(lon.ravel(), lat.ravel())
x = x.reshape(np.shape(layer))
y = y.reshape(np.shape(layer))
minval = np.min(np.min(layer))
maxval = np.max(np.max(layer)) + .1

#m.shadedrelief()
m.drawcoastlines()
m.drawcountries()

dir = '/raid1/zl382/Data/' + Location + '/' + event + '/'
location_dict = np.load(dir + 'STALOCATION.npy').item()
cat = obspy.read_events(dir + 'CMTSOLUTION')
elat = cat[0].origins[0].latitude
elon = cat[0].origins[0].longitude
edepth = cat[0].origins[0].depth / 1000

pierce1lon = []
pierce1lat = []
pierce2lon = []
pierce2lat = []
dt = []

######
for s, (s_name, (dist, azi, slat, slon,
                 sazi)) in enumerate(location_dict.items()):

    if azi<az_min or azi>az_max \
Ejemplo n.º 38
0
def get_event_by_id(event_id, fdsn_dc, scratch_dir, log_file, gcmt_dc=None):
    """From FDSN get an event based on ID and then get the corresponding GCMT event, if any"""

    fdsn_query = f'eventid={event_id}&nodata=404'
    _url = f'{event_service_url[fdsn_dc]}{fdsn_query}'
    print(f'[INFO] Requesting:\n{_url}', flush=True, file=log_file)
    try:
        catalog = read_events(_url)
    except Exception as er:
        print(f'[ERR] Request failed\n{er}', flush=True, file=log_file)
        sys.exit(1)
    fdsn_event = get_catalog_event(catalog[0], log_file)
    if gcmt_dc is None:
        return fdsn_event

    fdsn_origin = fdsn_event['origin']
    search_start = fdsn_origin.time - timedelta(
        seconds=association_threshold['seconds'], minutes=0, hours=0,
        days=0) * association_threshold['gcmt_factor']
    search_end = fdsn_origin.time + timedelta(
        seconds=association_threshold['seconds'], minutes=0, hours=0,
        days=0) * association_threshold['gcmt_factor']
    s_y, s_m, s_d = search_start.strftime('%Y-%m-%d').split('-')
    e_y, e_m, e_d = search_end.strftime('%Y-%m-%d').split('-')

    # GCMT form is not very sensitive to lat/lon, need to increase the threshold for search.
    # Watch for validity.
    lat_min = fdsn_origin.latitude - association_threshold[
        'latitude'] * association_threshold['gcmt_factor']
    if lat_min < -90.0:
        lat_min = -90.0
    lat_max = fdsn_origin.latitude + association_threshold[
        'latitude'] * association_threshold['gcmt_factor']
    if lat_max > 90.0:
        lat_max = 90.0

    lon_min = fdsn_origin.longitude - association_threshold[
        'longitude'] * association_threshold['gcmt_factor']
    if lon_min < -180.0:
        lon_min = -180.0
    lon_max = fdsn_origin.longitude + association_threshold[
        'longitude'] * association_threshold['gcmt_factor']
    if lon_max > 180.0:
        lon_max = 180.0

    depth_min = (
        fdsn_origin.depth / 1000.0
    ) - association_threshold['depth'] * association_threshold['gcmt_factor']
    if depth_min < 0:
        depth_min = 0.0
    depth_max = (
        fdsn_origin.depth / 1000.0
    ) + association_threshold['depth'] * association_threshold['gcmt_factor']

    mag_min = fdsn_event['magnitude'].mag - association_threshold['magnitude']
    mag_max = fdsn_event['magnitude'].mag + association_threshold['magnitude']

    gcmt_query = (
        f'itype=ymd&yr={s_y}&mo={int(s_m)}&day={int(s_d)}'
        f'&oyr={e_y}&omo={int(e_m)}&oday={int(e_d)}&jyr=1976&jday=1&ojyr=1976'
        f'&ojday=1&otype=ymd&nday=1&lmw={mag_min}&umw={mag_max}&lms=0&ums=10&lmb=0&umb=10&llat={lat_min}'
        f'&ulat={lat_max}&llon={lon_min}&ulon={lon_max}'
        f'&lhd={depth_min}&uhd={depth_max}&lts=-9999&uts=9999&lpe1=0'
        f'&upe1=90&lpe2=0&upe2=90&list=4')
    _url = f'{event_service_url[gcmt_dc]}{gcmt_query}'
    start_flag = 0
    gcmt_events = ''
    while start_flag is not None:
        if start_flag == 0:
            these_events, start_flag = retrieve_gcmt_events(_url, log_file)
        else:
            these_events, start_flag = retrieve_gcmt_events(
                f'{_url}&start={start_flag}', log_file)

        if not gcmt_events:
            gcmt_events = these_events
        else:
            gcmt_events = f'{gcmt_events}\n{these_events}'

    if gcmt_events is None:
        print(f'[WARN] No GCMT events found!', flush=True, file=log_file)
        return fdsn_event, None
    else:
        tf_name = os.path.join(scratch_dir, f'{str(uuid.uuid4())}.txt')
        with open(tf_name, 'w') as fp:
            fp.write(gcmt_events)
        fp.close()
        catalog = obspy.read_events(tf_name)
        os.remove(tf_name)

        if catalog:
            for cat in catalog:
                _gcmt_event = get_catalog_event(cat, log_file)

                # Make sure event time is acceptable.
                if search_start <= _gcmt_event['datetime'] <= search_end:
                    _tensor = _gcmt_event[
                        'focal_mechanism'].moment_tensor.tensor
                    gcmt_event = _gcmt_event
                    gcmt_event['mt'] = [
                        _tensor.m_rr, _tensor.m_tt, _tensor.m_pp, _tensor.m_rt,
                        _tensor.m_rp, _tensor.m_tp
                    ]
                    break
                else:
                    gcmt_event = None
            if gcmt_event is None:
                print(f'[WARN] No GCMT events returned!',
                      flush=True,
                      file=log_file)
                return fdsn_event, None
        else:
            print(f'[WARN] No GCMT events returned!',
                  flush=True,
                  file=log_file)
            return fdsn_event, None

    gcmt_query = gcmt_query.replace('list=4', 'list=2')
    gcmt_id = gcmt_event['id']

    _url = f'{event_service_url[gcmt_dc]}{gcmt_query}'
    print(f'[INFO] Requesting: {_url}', flush=True, file=log_file)
    strikes = retrieve_gcmt_events(_url, log_file, eid=gcmt_id)
    print(f'[INFO] Strikes are {strikes}', flush=True, file=log_file)
    gcmt_event['strikes'] = strikes

    return fdsn_event, gcmt_event
Ejemplo n.º 39
0
            max_npts = int(np.max(npts))
            for _tr in synt:
                _tr.data.resize(max_npts)

        # Process the synthetics
        lutils.print_action("Processing synthetics")
        processdict["remove_response_flag"] = False
        processdict["rotate_flag"] = False
        psynt = lseis.multiprocess_stream(synt,
                                          processdict,
                                          nprocs=nprocs,
                                          pool=pool)

        window_dict = dict(station=inv,
                           event=read_events(eventfile)[0],
                           _verbose=True,
                           config_dict=dict(
                               config={
                                   "min_period": 150.0,
                                   "max_period": 300.0,
                                   "stalta_waterlevel": 0.085,
                                   "tshift_acceptance_level": 40.0,
                                   "tshift_reference": 0.0,
                                   "dlna_acceptance_level": 0.75,
                                   "dlna_reference": 0.0,
                                   "cc_acceptance_level": 0.85,
                                   "s2n_limit": 3.0,
                                   "s2n_limit_energy": 3.0,
                                   "window_signal_to_noise_type": "amplitude",
                                   "selection_mode": "surface_waves",
Ejemplo n.º 40
0
            print('Event count after ' + i + ' = ' + str(cat_filt.count()))
        else:
            cat_filt = cat_filt.filter(i)
            print('Event count after ' + i + ' = ' + str(cat_filt.count()))

    print('Original number of events = ' + str(cat.count()) + '\n' +
          'Number of events after filter = ' + str(cat_filt.count()) + '\n' +
          'Number of events filtered = ' +
          str((cat.count() - cat_filt.count())) + '\n')

    return cat_filt


# Load QML file into obspy catalog
print('Importing Catalog...')
cat = read_events(os.path.join(in_dir + in_file))

if filt_events:
    cat_filt = filter_cat(cat)
    # export filtered catalog in QML format
    cat_filt.write(outqml_dir + outqml_name, format="QUAKEML")
    cat = cat_filt

# make list of event times, lon, lat, depth, mag
evt_list = []
index = 0
for evt in cat:
    evt_list.append([])
    evt_list[index].append(evt.origins[0].time)
    evt_list[index].append(evt.origins[0].longitude)
    evt_list[index].append(evt.origins[0].latitude)
Ejemplo n.º 41
0
def get_gcmt_events(start_str,
                    end_str,
                    lat_limits,
                    lon_limits,
                    min_mag,
                    log_file,
                    max_depth=1000,
                    min_depth=0,
                    gcmt_dc='GCMT',
                    scratch_dir=None,
                    return_url=False):
    """get GCMT events"""

    start_str = start_str.strip()
    start_str.replace(' ', 'T')
    if 'T' in start_str:
        start_str = start_str.split('T')[0]

    end_str = end_str.strip()
    end_str.replace(' ', 'T')
    if 'T' in end_str:
        end_str = end_str.split('T')[0]
    s_y, s_m, s_d = start_str.split('-')
    e_y, e_m, e_d = end_str.split('-')

    gcmt_description = list()
    gcmt_origin = list()
    gcmt_magnitude = list()
    gcmt_focal_mechanism = list()
    gcmt_mt = list()
    gcmt_id = list()

    lat_min = lat_limits[0]
    if lat_min < -90.0:
        lat_min = -90.0

    lat_max = lat_limits[1]
    if lat_max > 90.0:
        lat_max = 90.0

    lon_min = lon_limits[0]
    if lon_min < -180.0:
        lon_min = -180.0

    lon_max = lon_limits[1]
    if lon_max > 180.0:
        lon_max = 180.0

    gcmt_query = (
        f'itype=ymd&yr={s_y}&mo={int(s_m)}&day={int(s_d)}'
        f'&oyr={e_y}&omo={int(e_m)}&oday={int(e_d)}&jyr=1976&jday=1&ojyr=1976'
        f'&ojday=1&otype=ymd&nday=1&lmw={min_mag}&umw=10&lms=0&ums=10&lmb=0&umb=10&llat={lat_min}'
        f'&ulat={lat_max}&llon={lon_min}&ulon={lon_max}'
        f'&lhd={min_depth}&uhd={max_depth}&lts=-9999&uts=9999&lpe1=0'
        f'&upe1=90&lpe2=0&upe2=90&list=4')

    url = f'{event_service_url[gcmt_dc]}{gcmt_query}'
    start_flag = 0
    gcmt_events = None
    while start_flag is not None:
        if start_flag == 0:
            these_events, start_flag = retrieve_gcmt_events(url, log_file)
        else:
            these_events, start_flag = retrieve_gcmt_events(
                f'{url}&start={start_flag}', log_file)

        if gcmt_events is None:
            gcmt_events = these_events
        else:
            gcmt_events = f'{gcmt_events}\n{these_events}'

    if gcmt_events is None:
        print(f'[WARN] No GCMT events found!', flush=True, file=log_file)
        gcmt_event = None
    else:
        tf_name = os.path.join(scratch_dir, f'{str(uuid.uuid4())}.txt')
        # print(f'[INFO] TF:{tf_name}')
        with open(tf_name, 'w') as fp:
            fp.write(gcmt_events)
        fp.close()
        catalog = obspy.read_events(tf_name)
        os.remove(tf_name)

        for cat in catalog:
            this_event = get_catalog_event(cat, log_file)
            gcmt_id.append(cat.resource_id.id)
            gcmt_description.append(this_event['description'])
            gcmt_origin.append(this_event['origin'])
            gcmt_magnitude.append(this_event['magnitude'])
            gcmt_focal_mechanism.append(this_event['focal_mechanism'])
            tensor = this_event['focal_mechanism'].moment_tensor.tensor
            gcmt_mt.append([
                tensor.m_rr, tensor.m_tt, tensor.m_pp, tensor.m_rt,
                tensor.m_rp, tensor.m_tp
            ])
        gcmt_event = {
            'description': gcmt_description,
            'origin': gcmt_origin,
            'magnitude': gcmt_magnitude,
            'focal_mechanism': gcmt_focal_mechanism,
            'mt': gcmt_mt,
            'id': gcmt_id
        }

    if return_url:
        return gcmt_event, url
    else:
        return gcmt_event
Ejemplo n.º 42
0
#!/usr/bin/env python
import sys
sys.path.insert(0, '/home/chet/EQcorrscan')

"""Creating the input files for HypoDD from nlloc relocated catalog"""
import numpy as np
import os
from collections import Counter
from obspy import read_events, read, UTCDateTime
from glob import glob
from obspy.core.event import ResourceIdentifier
from eqcorrscan.utils import catalog_to_dd

cat = read_events('/media/chet/hdd/seismic/NZ/catalogs/2015_dets_nlloc/2015_dets_nlloc_Sherburn.xml')

# Need to deal with events which have origins
cat.events = [ev for ev in cat if len(ev.origins) != 0]

# Remove dup events by rounding to nearest second and removing duplicates
times = [(i, np.round(float(ev.preferred_origin().time.strftime('%Y%m%d%H%M%S.%f')))) for i, ev in enumerate(cat)]
seen = {}
result = []
for time in times:
    if time[1] in seen:
        continue
    else:
        seen[time[1]] = 1
        result.append(time)
indices, times = zip(*result)
cat.events = [ev for i, ev in enumerate(cat) if i in indices]
Ejemplo n.º 43
0
def _assert_catalog(got):
    got_id_prefix = str(got.resource_id).encode('UTF-8')
    # first of all, replace the random hash in our test file with the prefix
    # that was used during reading the ISF file
    with open(path_to_quakeml, 'rb') as fh:
        data = fh.read()
    match = re.search(b'publicID="(smi:local/[a-z0-9-]*)"', data)
    expected_id_prefix = match.group(1)
    data, num_subs = re.subn(expected_id_prefix, got_id_prefix, data)
    # 49 resource id replacements should be done in the QuakeML file we compare
    # against
    assert num_subs == 49
    bio = io.BytesIO(data)
    expected = read_events(bio, format="QUAKEML")
    # now first check if we got the expected number of picks and station
    # magnitudes, because the quakeml file has been stripped of picks and
    # station magnitudes to save space and time
    assert len(got[0].picks) == 255
    assert len(got[0].station_magnitudes) == 15
    # ok now crop the got catalog accordingly, afterwards it should compare
    # equal to our comparison catalog
    got[0].picks = got[0].picks[:4]
    got[0].station_magnitudes = got[0].station_magnitudes[:7]
    # # now we also have to replace comment ids in both catalogs..
    # for cat in (got, expected):
    #     for item in cat.events + cat[0].origins + cat[0].magnitudes + \
    #             cat[0].station_magnitudes + cat[0].picks:
    #         for comment in item.comments:
    #             comment.resource_id = 'smi:local/dummy'

    # some more fixes for the comparison, these are due to buggy QuakeML reader
    # behavior and should be fixed in io.quakeml eventually
    for event in expected:
        for pick in event.picks:
            # QuakeML reader seems to set `network_code=""` if it's not in the
            # xml file.. account for this strange behavior for this test
            pick.waveform_id.network_code = None
            # QuakeML reader seems to add empty QuantityError for
            # pick.horizontal_slowness_errors
            for key in ['horizontal_slowness_errors', 'time_errors',
                        'backazimuth_errors']:
                setattr(pick, key, None)
        for origin in event.origins:
            if origin.origin_uncertainty is not None:
                # QuakeML reader seems to add empty ConfidenceEllipsoid
                origin.origin_uncertainty.confidence_ellipsoid = None
            # QuakeML reader seems to add empty QuantityError for
            # pick.horizontal_slowness_errors
            for key in ['time_errors', 'longitude_errors', 'latitude_errors',
                        'depth_errors']:
                setattr(origin, key, None)
        for station_magnitude in event.station_magnitudes:
            # QuakeML reader seems to set origin_id to
            # `ResourceIdentifier(id="None")`
            # QuakeML reader seems to add empty QuantityError for
            # pick.horizontal_slowness_errors
            for key in ['origin_id', 'mag_errors']:
                setattr(station_magnitude, key, None)
        for magnitude in event.magnitudes:
            # QuakeML reader seems to add empty QuantityError for
            # pick.horizontal_slowness_errors
            for key in ['mag_errors']:
                setattr(magnitude, key, None)
    # now finally these catalogs should compare equal
    assert got == expected
Ejemplo n.º 44
0
#!/usr/bin/env python

"""Simple script to merge a directory of catalogs"""

from glob import glob
import seaborn as sns
from obspy import read_events, Catalog

cat_dir = '/media/chet/hdd/seismic/NZ/catalogs/2015_det2cat/*'
cats = glob(cat_dir)

master_cat = Catalog()
for cat in cats:
    master_cat += read_events(cat)

# Check pick quality distribution for pick exclusion
pk_qual = [float(pk.comments[0].text.split('=')[-1]) for ev in master_cat for pk in ev.picks]
Ejemplo n.º 45
0
 def catalog(self, kem_archive):
     """ return the events """
     return obspy.read_events(
         str(self.base_path / "kemmerer" / "events.xml"))
Ejemplo n.º 46
0
def search_GCMT(date):
    """lookup function for finding events in GCMT catalogs stored internally
    :type date: str format accepted by UTCDateTime
    """
    date = UTCDateTime(date)
    day_before = date - (60 * 60 * 24)
    two_days_before = date - (2 * 60 * 60 * 24)
    day_after = date + (60 * 60 * 24)

    month_dict = {
        4: "apr",
        12: "dec",
        1: "jan",
        6: "jun",
        5: "may",
        10: "oct",
        8: "aug",
        2: "feb",
        7: "jul",
        3: "mar",
        11: "nov",
        9: "sep"
    }
    year = str(date.year)
    month = date.month

    fid = "{m}{y}.ndk".format(m=month_dict[month], y=year[2:])
    filepath = os.path.join(pathnames()['data'], "GCMT", year, fid)

    # files can also be read directly from GCMT website
    gcmt_standard_url = ("https://www.ldeo.columbia.edu/~gcmt/projects/CMT/"
                         "catalog/NEW_MONTHLY/{y}/"
                         "{m}{ys}.ndk".format(y=year,
                                              m=month_dict[month],
                                              ys=year[2:]))
    gcmt_quick_url = ("http://www.ldeo.columbia.edu/~gcmt/projects/CMT/"
                      "catalog/NEW_QUICK/qcmt.ndk")

    try:
        cat = read_events(filepath)
    except FileNotFoundError:
        try:
            print("[getdata.get_GCMT_solution] internal .ndk file not found, "
                  "searching for GCMT standard url")
            cat = read_events(gcmt_standard_url)
        except Exception as e:
            print("[getdata.get_GCMT_solution] standard url not found, "
                  "searching GCMT quick solutions")
            cat = read_events(gcmt_quick_url)
    cat_filt = cat.filter("time > {}".format(str(two_days_before)),
                          "time < {}".format(str(day_after)),
                          "magnitude >= {}".format(7.5))
    if len(cat_filt) == 0:
        print("[getdata.get_GCMT_solution] No events found")
        return None
    elif len(cat_filt) > 1:
        print("[getdata.get_GCMT_solution]"
              " {} events found, choose from list:".format(len(cat_filt)))
        print(MT)
        print(cat_filt)
        choice = int(input("Event number (index from 0): "))
        event = cat_filt[choice]
        return event
    else:
        event = cat_filt[0]
        return event
Ejemplo n.º 47
0
 def test_df_are_same(self, read_catalog):
     df = events_to_df(obspy.read_events())
     assert (df.columns == read_catalog.columns).all()
     assert len(df) == len(read_catalog)
     assert set(df.time) == set(read_catalog.time)
Ejemplo n.º 48
0
    def _fetch_data(self, waveforms_id=None):
        # Fetch event's traces from ws or cached files
        cat = None
        fetch_from_cache_success = None

        if self.use_cache:
            if os.path.isfile(self.backup_event_file):
                logger.debug(
                    "Fetching event %s from file %s.",
                    self.event.id,
                    self.backup_event_file,
                )
                cat = read_events(self.backup_event_file)
                fetch_from_cache_success = True
            else:
                logger.debug(
                    "Trying to fetch event %s from file. But %s does not exist!",
                    self.event.id,
                    self.backup_event_file,
                )

                fetch_from_cache_success = False

        if not self.use_cache or fetch_from_cache_success is not True:
            logger.info("Fetching event %s from FDSN-WS.", self.event.id)
            cat = self.get_event()

        if not cat:
            logger.error("No event found !")
            return

        try:
            self.event.qml = cat.events[0]
        except Exception as e:
            logger.error(e)
            return

        (
            self.event.latitude,
            self.event.longitude,
            self.event.depth,
        ) = self.get_event_coordinates(self.event.qml)
        self.event.T0 = self.get_event_time(self.event.qml)
        self.event.event_type = self.get_event_type(self.event.qml)

        if waveforms_id:
            self.waveforms_id = waveforms_id
        else:
            self.waveforms_id = self._hack_streams(
                self.get_event_waveforms_id(self.event.qml))
            self.show_pick_offet(self.event.qml)

        # Set time window for trace extraction
        self._set_extraction_time_window()

        # Fetch traces from ws or cached file
        for w in self.black_listed_waveforms_id:
            try:
                self.waveforms_id.remove(w)
            except Exception:
                pass

        fetch_from_cache_success = None
        if self.use_cache:
            if os.path.isfile(self.backup_traces_file):
                logger.info("Fetching traces from cached file %s.",
                            self.backup_traces_file)
                with open(self.backup_traces_file, "rb") as fp:
                    self.st = cPickle.load(fp,
                                           fix_imports=True,
                                           encoding="ASCII",
                                           errors="strict")

                # remove black listed waveform_id
                for w in self.black_listed_waveforms_id:
                    for tr in self.st.select(id=w):
                        self.st.remove(tr)
                fetch_from_cache_success = True
            else:
                logger.info(
                    "Trying to fetch traces from cached file, but %s does not exist!",
                    self.backup_event_file,
                )
                fetch_from_cache_success = False

        if not self.use_cache or fetch_from_cache_success is not True:
            logger.info("Fetching traces from FDSN-WS.")
            self.st = self.get_trace(self.starttime, self.endtime)

        if self.st == []:
            logger.warning("No traces !")
Ejemplo n.º 49
0
instaseisDB = f.read()
db = instaseis.open_db(instaseisDB)
print(db)

##############################################
## CALCULATE SYNTHETICS AND OUTPUT MAT FILE ##
##############################################

## SETUP FOLDER PATHS
f = open("event_name.txt", "r")
evnum = f.read()
synthDir = "./" + evnum + "_synth/"

## READ EVENT INFORMATION FROM GCMT ##
cat = obs.read_events(
    "https://www.ldeo.columbia.edu/~gcmt/projects/CMT/catalog/NEW_QUICK/E" +
    evnum + "A.ndk")
ref_time = cat[0].origins[0].time.timestamp
cmt_time = cat[0].origins[1].time.timestamp
shift_time = cmt_time - ref_time

## MAKE SYNTHETICS DIRECTORY ##
if not os.path.exists(synthDir):
    os.makedirs(synthDir)

## LOOP THROUGH STATIONS ##
datadir = "./" + evnum + "/"
fils = os.listdir(datadir)
for fil in fils:
    print(fil)
Ejemplo n.º 50
0
 def test_catalog_plot_global(self, image_path):
     """
     Tests the catalog preview plot, default parameters, using Cartopy.
     """
     cat = read_events()
     cat.plot(method='cartopy', outfile=image_path)
Ejemplo n.º 51
0
def template_gen(method, lowcut, highcut, samp_rate, filt_order,
                 length, prepick, swin="all", process_len=86400,
                 all_horiz=False, delayed=True, plot=False, plotdir=None,
                 return_event=False, min_snr=None, parallel=False,
                 num_cores=False, save_progress=False, skip_short_chans=False,
                 **kwargs):
    """
    Generate processed and cut waveforms for use as templates.

    :type method: str
    :param method:
        Template generation method, must be one of ('from_client',
        'from_seishub', 'from_sac', 'from_meta_file'). - Each method requires
        associated arguments, see note below.
    :type lowcut: float
    :param lowcut: Low cut (Hz), if set to None will not apply a lowcut.
    :type highcut: float
    :param highcut: High cut (Hz), if set to None will not apply a highcut.
    :type samp_rate: float
    :param samp_rate: New sampling rate in Hz.
    :type filt_order: int
    :param filt_order: Filter level (number of corners).
    :type length: float
    :param length: Length of template waveform in seconds.
    :type prepick: float
    :param prepick: Pre-pick time in seconds
    :type swin: str
    :param swin:
        P, S, P_all, S_all or all, defaults to all: see note in
        :func:`eqcorrscan.core.template_gen.template_gen`
    :type process_len: int
    :param process_len: Length of data in seconds to download and process.
    :type all_horiz: bool
    :param all_horiz:
        To use both horizontal channels even if there is only a pick on one of
        them.  Defaults to False.
    :type delayed: bool
    :param delayed: If True, each channel will begin relative to it's own \
        pick-time, if set to False, each channel will begin at the same time.
    :type plot: bool
    :param plot: Plot templates or not.
    :type plotdir: str
    :param plotdir:
        The path to save plots to. If `plotdir=None` (default) then the figure
        will be shown on screen.
    :type return_event: bool
    :param return_event: Whether to return the event and process length or not.
    :type min_snr: float
    :param min_snr:
        Minimum signal-to-noise ratio for a channel to be included in the
        template, where signal-to-noise ratio is calculated as the ratio of
        the maximum amplitude in the template window to the rms amplitude in
        the whole window given.
    :type parallel: bool
    :param parallel: Whether to process data in parallel or not.
    :type num_cores: int
    :param num_cores:
        Number of cores to try and use, if False and parallel=True, will use
        either all your cores, or as many traces as in the data (whichever is
        smaller).
    :type save_progress: bool
    :param save_progress:
        Whether to save the resulting templates at every data step or not.
        Useful for long-running processes.
    :type skip_short_chans: bool
    :param skip_short_chans:
        Whether to ignore channels that have insufficient length data or not.
        Useful when the quality of data is not known, e.g. when downloading
        old, possibly triggered data from a datacentre

    :returns: List of :class:`obspy.core.stream.Stream` Templates
    :rtype: list

    .. note::
        By convention templates are generated with P-phases on the
        vertical channel and S-phases on the horizontal channels, normal
        seismograph naming conventions are assumed, where Z denotes vertical
        and N, E, R, T, 1 and 2 denote horizontal channels, either oriented
        or not.  To this end we will **only** use Z channels if they have a
        P-pick, and will use one or other horizontal channels **only** if
        there is an S-pick on it.

    .. warning::
        If there is no phase_hint included in picks, and swin=all, all channels
        with picks will be used.

    .. note::
        If swin=all, then all picks will be used, not just phase-picks (e.g. it
        will use amplitude picks). If you do not want this then we suggest
        that you remove any picks you do not want to use in your templates
        before using the event.

    .. note::
        *Method specific arguments:*

        - `from_client` requires:
            :param str client_id:
                string passable by obspy to generate Client, or any object
                with a `get_waveforms` method, including a Client instance.
            :param `obspy.core.event.Catalog` catalog:
                Catalog of events to generate template for
            :param float data_pad: Pad length for data-downloads in seconds
        - `from_seishub` requires:
            :param str url: url to seishub database
            :param `obspy.core.event.Catalog` catalog:
                Catalog of events to generate template for
            :param float data_pad: Pad length for data-downloads in seconds
        - `from_sac` requires:
            :param list sac_files:
                osbpy.core.stream.Stream of sac waveforms, or list of paths to
                sac waveforms.
            .. note::
                See `eqcorrscan.utils.sac_util.sactoevent` for details on
                how pick information is collected.
        - `from_meta_file` requires:
            :param str meta_file:
                Path to obspy-readable event file, or an obspy Catalog
            :param `obspy.core.stream.Stream` st:
                Stream containing waveform data for template. Note that this
                should be the same length of stream as you will use for the
                continuous detection, e.g. if you detect in day-long files,
                give this a day-long file!
            :param bool process:
                Whether to process the data or not, defaults to True.

    .. note::
        process_len should be set to the same length as used when computing
        detections using match_filter.match_filter, e.g. if you read
        in day-long data for match_filter, process_len should be 86400.

        .. rubric:: Example

    >>> from obspy.clients.fdsn import Client
    >>> from eqcorrscan.core.template_gen import template_gen
    >>> client = Client('NCEDC')
    >>> catalog = client.get_events(eventid='72572665', includearrivals=True)
    >>> # We are only taking two picks for this example to speed up the
    >>> # example, note that you don't have to!
    >>> catalog[0].picks = catalog[0].picks[0:2]
    >>> templates = template_gen(
    ...    method='from_client', catalog=catalog, client_id='NCEDC',
    ...    lowcut=2.0, highcut=9.0, samp_rate=20.0, filt_order=4, length=3.0,
    ...    prepick=0.15, swin='all', process_len=300, all_horiz=True)
    >>> templates[0].plot(equal_scale=False, size=(800,600)) # doctest: +SKIP

    .. figure:: ../../plots/template_gen.from_client.png

    .. rubric:: Example

    >>> from obspy import read
    >>> from eqcorrscan.core.template_gen import template_gen
    >>> # Get the path to the test data
    >>> import eqcorrscan
    >>> import os
    >>> TEST_PATH = os.path.dirname(eqcorrscan.__file__) + '/tests/test_data'
    >>> st = read(TEST_PATH + '/WAV/TEST_/' +
    ...           '2013-09-01-0410-35.DFDPC_024_00')
    >>> quakeml = TEST_PATH + '/20130901T041115.xml'
    >>> templates = template_gen(
    ...    method='from_meta_file', meta_file=quakeml, st=st, lowcut=2.0,
    ...    highcut=9.0, samp_rate=20.0, filt_order=3, length=2, prepick=0.1,
    ...    swin='S', all_horiz=True)
    >>> print(len(templates[0]))
    10
    >>> templates = template_gen(
    ...    method='from_meta_file', meta_file=quakeml, st=st, lowcut=2.0,
    ...    highcut=9.0, samp_rate=20.0, filt_order=3, length=2, prepick=0.1,
    ...    swin='S_all', all_horiz=True)
    >>> print(len(templates[0]))
    15

    .. rubric:: Example

    >>> from eqcorrscan.core.template_gen import template_gen
    >>> import glob
    >>> # Get all the SAC-files associated with one event.
    >>> sac_files = glob.glob(TEST_PATH + '/SAC/2014p611252/*')
    >>> templates = template_gen(
    ...    method='from_sac', sac_files=sac_files, lowcut=2.0, highcut=10.0,
    ...    samp_rate=25.0, filt_order=4, length=2.0, swin='all', prepick=0.1,
    ...    all_horiz=True)
    >>> print(templates[0][0].stats.sampling_rate)
    25.0
    >>> print(len(templates[0]))
    15
    """
    client_map = {'from_client': 'fdsn', 'from_seishub': 'seishub'}
    assert method in ('from_client', 'from_seishub', 'from_meta_file',
                      'from_sac')
    if not isinstance(swin, list):
        swin = [swin]
    process = True
    if method in ['from_client', 'from_seishub']:
        catalog = kwargs.get('catalog', Catalog())
        data_pad = kwargs.get('data_pad', 90)
        # Group catalog into days and only download the data once per day
        sub_catalogs = _group_events(
            catalog=catalog, process_len=process_len, template_length=length,
            data_pad=data_pad)
        if method == 'from_client':
            client_id = kwargs.get('client_id', None)
            if hasattr(client_id, 'get_waveforms'):
                client = client_id
            elif isinstance(client_id, str):
                client = FDSNClient(client_id)
            else:
                raise NotImplementedError(
                    "client_id must be an FDSN client string, or a Client "
                    "with a get_waveforms method"
                )
            available_stations = []
        else:
            client = SeisHubClient(kwargs.get('url', None), timeout=10)
            available_stations = client.waveform.get_station_ids()
    elif method == 'from_meta_file':
        if isinstance(kwargs.get('meta_file'), Catalog):
            catalog = kwargs.get('meta_file')
        elif kwargs.get('meta_file'):
            catalog = read_events(kwargs.get('meta_file'))
        else:
            catalog = kwargs.get('catalog')
        sub_catalogs = [catalog]
        st = kwargs.get('st', Stream())
        process = kwargs.get('process', True)
    elif method == 'from_sac':
        sac_files = kwargs.get('sac_files')
        if isinstance(sac_files, list):
            if isinstance(sac_files[0], (Stream, Trace)):
                # This is a list of streams...
                st = Stream(sac_files[0])
                for sac_file in sac_files[1:]:
                    st += sac_file
            else:
                sac_files = [read(sac_file)[0] for sac_file in sac_files]
                st = Stream(sac_files)
        else:
            st = sac_files
        # Make an event object...
        catalog = Catalog([sactoevent(st)])
        sub_catalogs = [catalog]

    temp_list = []
    process_lengths = []
    catalog_out = Catalog()

    if "P_all" in swin or "S_all" in swin or all_horiz:
        all_channels = True
    else:
        all_channels = False
    for sub_catalog in sub_catalogs:
        if method in ['from_seishub', 'from_client']:
            Logger.info("Downloading data")
            st = _download_from_client(
                client=client, client_type=client_map[method],
                catalog=sub_catalog, data_pad=data_pad,
                process_len=process_len, available_stations=available_stations,
                all_channels=all_channels)
        Logger.info('Pre-processing data')
        st.merge()
        if len(st) == 0:
            Logger.info("No data")
            continue
        if process:
            data_len = max([len(tr.data) / tr.stats.sampling_rate
                            for tr in st])
            if 80000 < data_len < 90000:
                daylong = True
                starttime = min([tr.stats.starttime for tr in st])
                min_delta = min([tr.stats.delta for tr in st])
                # Cope with the common starttime less than 1 sample before the
                #  start of day.
                if (starttime + min_delta).date > starttime.date:
                    starttime = (starttime + min_delta)
                # Check if this is stupid:
                if abs(starttime - UTCDateTime(starttime.date)) > 600:
                    daylong = False
                starttime = starttime.date
            else:
                daylong = False
            # Check if the required amount of data have been downloaded - skip
            # channels if arg set.
            for tr in st:
                if np.ma.is_masked(tr.data):
                    _len = np.ma.count(tr.data) * tr.stats.delta
                else:
                    _len = tr.stats.npts * tr.stats.delta
                if _len < process_len * .8:
                    Logger.info(
                        "Data for {0} are too short, skipping".format(
                            tr.id))
                    if skip_short_chans:
                        continue
                # Trim to enforce process-len
                tr.data = tr.data[0:int(process_len * tr.stats.sampling_rate)]
            if len(st) == 0:
                Logger.info("No data")
                continue
            if daylong:
                st = pre_processing.dayproc(
                    st=st, lowcut=lowcut, highcut=highcut,
                    filt_order=filt_order, samp_rate=samp_rate,
                    parallel=parallel, starttime=UTCDateTime(starttime),
                    num_cores=num_cores)
            else:
                st = pre_processing.shortproc(
                    st=st, lowcut=lowcut, highcut=highcut,
                    filt_order=filt_order, parallel=parallel,
                    samp_rate=samp_rate, num_cores=num_cores)
        data_start = min([tr.stats.starttime for tr in st])
        data_end = max([tr.stats.endtime for tr in st])

        for event in sub_catalog:
            stations, channels, st_stachans = ([], [], [])
            if len(event.picks) == 0:
                Logger.warning(
                    'No picks for event {0}'.format(event.resource_id))
                continue
            use_event = True
            # Check that the event is within the data
            for pick in event.picks:
                if not data_start < pick.time < data_end:
                    Logger.warning(
                        "Pick outside of data span: Pick time {0} Start "
                        "time {1} End time: {2}".format(
                            str(pick.time), str(data_start), str(data_end)))
                    use_event = False
            if not use_event:
                Logger.error('Event is not within data time-span')
                continue
            # Read in pick info
            Logger.debug("I have found the following picks")
            for pick in event.picks:
                if not pick.waveform_id:
                    Logger.warning(
                        'Pick not associated with waveforms, will not use:'
                        ' {0}'.format(pick))
                    continue
                Logger.debug(pick)
                stations.append(pick.waveform_id.station_code)
                channels.append(pick.waveform_id.channel_code)
            # Check to see if all picks have a corresponding waveform
            for tr in st:
                st_stachans.append('.'.join([tr.stats.station,
                                             tr.stats.channel]))
            # Cut and extract the templates
            template = _template_gen(
                event.picks, st, length, swin, prepick=prepick, plot=plot,
                all_horiz=all_horiz, delayed=delayed, min_snr=min_snr,
                plotdir=plotdir)
            process_lengths.append(len(st[0].data) / samp_rate)
            temp_list.append(template)
            catalog_out += event
        if save_progress:
            if not os.path.isdir("eqcorrscan_temporary_templates"):
                os.makedirs("eqcorrscan_temporary_templates")
            for template in temp_list:
                template.write(
                    "eqcorrscan_temporary_templates{0}{1}.ms".format(
                        os.path.sep, template[0].stats.starttime.strftime(
                            "%Y-%m-%dT%H%M%S")),
                    format="MSEED")
        del st
    if return_event:
        return temp_list, catalog_out, process_lengths
    return temp_list
Ejemplo n.º 52
0
def cat():
    """
    ObsPy Event Catalog for New Zealand based event with
    GeoNet Event ID: 2018p130600
    """
    return read_events("./test_data/test_catalog_2018p130600.xml")
Ejemplo n.º 53
0
def load_cmtsolution(cmtsolution_path):
    cmtsolution = obspy.read_events(cmtsolution_path)[0]
    return cmtsolution
Ejemplo n.º 54
0
    def read(self,
             filename=None,
             read_detection_catalog=True,
             estimate_origin=True):
        """
        Read a Party from a file.

        :type filename: str
        :param filename:
            File to read from - can be a list of files, and can contain
            wildcards.
        :type read_detection_catalog: bool
        :param read_detection_catalog:
            Whether to read the detection catalog or not, if False, catalog
            will be regenerated - for large catalogs this can be faster.
        :type estimate_origins: bool
        :param estimate_origins:
            If True and no catalog is found, or read_detection_catalog is False
            then new events with origins estimated from the template origin
            time will be created.

        .. rubric:: Example

        >>> Party().read()
        Party of 4 Families.
        """
        from eqcorrscan.core.match_filter.tribe import Tribe

        tribe = Tribe()
        families = []
        if filename is None:
            # If there is no filename given, then read the example.
            filename = os.path.join(os.path.dirname(__file__), '..', '..',
                                    'tests', 'test_data', 'test_party.tgz')
        if isinstance(filename, list):
            filenames = []
            for _filename in filename:
                # Expand wildcards
                filenames.extend(glob.glob(_filename))
        else:
            # Expand wildcards
            filenames = glob.glob(filename)
        for _filename in filenames:
            with tarfile.open(_filename, "r:*") as arc:
                temp_dir = tempfile.mkdtemp()
                arc.extractall(path=temp_dir, members=_safemembers(arc))
            # Read in the detections first, this way, if we read from multiple
            # files then we can just read in extra templates as needed.
            # Read in families here!
            party_dir = glob.glob(temp_dir + os.sep + '*')[0]
            tribe._read_from_folder(dirname=party_dir)
            det_cat_file = glob.glob(os.path.join(party_dir, "catalog.*"))
            if len(det_cat_file) != 0 and read_detection_catalog:
                try:
                    all_cat = read_events(det_cat_file[0])
                except TypeError as e:
                    Logger.error(e)
                    pass
            else:
                all_cat = Catalog()
            for family_file in glob.glob(join(party_dir, '*_detections.csv')):
                template = [
                    t for t in tribe if _templates_match(t, family_file)
                ]
                family = Family(template=template[0] or Template())
                new_family = True
                if family.template.name in [f.template.name for f in families]:
                    family = [
                        f for f in families
                        if f.template.name == family.template.name
                    ][0]
                    new_family = False
                family.detections = _read_family(
                    fname=family_file,
                    all_cat=all_cat,
                    template=template[0],
                    estimate_origin=estimate_origin)
                if new_family:
                    families.append(family)
            shutil.rmtree(temp_dir)
        self.families = families
        return self
Ejemplo n.º 55
0
 def download_events(self):
     """Simply copy events from base directory."""
     cat = obspy.read_events(str(self.source_path / "events.xml"))
     obsplus.EventBank(self.event_path).put_events(cat)
Ejemplo n.º 56
0
 def test_read_events_without_parameters(self):
     """
     Calling read_events w/o any parameter will create an example catalog.
     """
     catalog = read_events()
     assert len(catalog) == 3
Ejemplo n.º 57
0
def event_trials(choice, csv_file, sep_km=None, desired_length=None):
    """
    For a given set of trial parameters, retrieve an event catalog and then
    slim it down with various check parameters; make sure events are not too 
    closely grouped, and that they contain moment tensor information from GeoNet
    Plot the final catalog, make beachballs for plotting purposes, return
    an .xml catalog

    :type choice: str
    :param choice: choice of parameter trial
    :type csv_file: str
    :param csv_file: path to GeoNet csv file with moment tensor information
    :type desired_length: int
    :param desired_length: desired length of catalog, event removal will try to 
    :return:
    """
    trials = {
        "charlie_trial": {
            "name": "charlie_trial",
            "starttime": UTCDateTime("2010-01-01T00:00:00"),
            "endtime": UTCDateTime("2019-11-01T00:00:00"),
            "minmagnitude": 4.75,
            "maxmagnitude": 6.,
            "minlatitude": -42.5,  # LLC
            "minlongitude": 173.5,  # LLC
            "maxlatitude": -37.25,  # URC
            "maxlongitude": 178.5,  # URC
            "maxdepth": 60.
        },
        "fullscale": {
            "name": "fullscale",
            "starttime": UTCDateTime("2000-01-01T00:00:00"),
            "endtime": UTCDateTime("2019-11-01T00:00:00"),
            "minmagnitude": 4.5,
            "maxmagnitude": 6.,
            "minlatitude": -42.5,  # LLC
            "minlongitude": 173.5,  # LLC
            "maxlatitude": -37.25,  # URC
            "maxlongitude": 178.5,  # URC
            "maxdepth": 60.
        },
        "aspen": {
            "name": "aspen",
            "starttime": UTCDateTime("2003-08-20T00:00:00"),  # GeoNet MT cat
            "endtime": UTCDateTime(),
            "minmagnitude": 4.4,
            "maxmagnitude": 6.,
            "minlatitude": -42.5,  # LLC
            "minlongitude": 173.5,  # LLC
            "maxlatitude": -37.0,  # URC
            "maxlongitude": 178.5,  # URC
            "maxdepth": 60.
        },
        "south": {
            "name": "south",
            "starttime": UTCDateTime("2003-08-20T00:00:00"),  # GeoNet MT cat
            "endtime": UTCDateTime(),
            "minmagnitude": 4.5,
            "maxmagnitude": 6.,
            "minlatitude": -48,  # LLC
            "minlongitude": 165,  # LLC
            "maxlatitude": -40.0,  # URC
            "maxlongitude": 175,  # URC
            "maxdepth": 60.
        }
    }

    evnts = trials[choice]

    # Create the catalog and plot the raw event catalog
    original_cat_fid = f"{cat_name}_original.xml"
    if not os.path.exists(original_cat_fid):
        c = Client("GEONET")
        original_cat = c.get_events(starttime=evnts['starttime'],
                                    endtime=evnts['endtime'],
                                    minmagnitude=evnts['minmagnitude'],
                                    maxmagnitude=evnts['maxmagnitude'],
                                    minlatitude=evnts['minlatitude'],
                                    minlongitude=evnts['minlongitude'],
                                    maxlatitude=evnts['maxlatitude'],
                                    maxlongitude=evnts['maxlongitude'],
                                    maxdepth=evnts['maxdepth'],
                                    orderby="magnitude-asc")
        print("catalog has {} events".format(len(original_cat)))
        # Delete unncessary attributes to save space
        for attr in ["picks", "amplitudes", "station_magnitudes"]:
            for event in original_cat:
                try:
                    delattr(event, attr)
                except KeyError:
                    continue
        original_cat.write(original_cat_fid, format="QUAKEML")
    else:
        original_cat = read_events(original_cat_fid)

    # Remove if no GeoNet moment tensors
    new_cat = check_moment_tensor(csv_file, original_cat)
    print("catalog has {} events".format(len(new_cat)))

    if sep_km is not None and desired_length is not None:
        cat_out = remove_groupings(new_cat, sep_km=sep_km)
        # Try separation distances until new catalog has desired length
        while True:
            if len(cat_out) == desired_length:
                break
            else:
                print(f"{len(cat_out)} events; "
                      f"desired: {desired_length}; "
                      f"current separation: {sep_km}")
                sep_km = input("new separation distance?: ")
                if sep_km:
                    sep_km = float(sep_km)

            cat_out = remove_groupings(new_cat, sep_km=sep_km)
            print("catalog has {} events".format(len(cat_out)))
    else:
        cat_out = new_cat

    # add moment tensor information to all events
    cat_out_w_mt = append_mt(cat_out, csv_file)

    cat_out_w_mt.write(f"{cat_name}_w_mt.xml", format="QUAKEML")
Ejemplo n.º 58
0
 def download_events(self):
     """ Just copy the events into a directory. """
     cat = obspy.read_events(str(self.source_path / "events.xml"))
     catalog_to_directory(cat, self.event_path)
Ejemplo n.º 59
0
def get_N(select_event=0):
	from obspy.clients.fdsn import Client
	import matplotlib.pyplot as plt
	from obspy import read_events, read
	from obspy import read_inventory
	from obspy import Stream, Trace
	from obspy import UTCDateTime
	import os
	client = Client('SCEDC')

	os.environ['PATH'] += os.pathsep + '/usr/local/bin'
	os.chdir('/Users/vidale/Documents/PyCode/LAB/Spare')

#	select_event = 12
	chan_type = 'EHN,HHN,HNN,HLN,BHN' # e.g., BHN
	network_sel = 'CI,CE,NP'  # CE has four traces, but won't deconvolve
	#network_sel = 'CI,NP'
	min_lat = 33.75
	max_lat = 34.2
	min_lon = -118.5
	max_lon = -117.75
	start_buff = 50
	end_buff = 300
	st = Stream()

	if select_event > 15:
		fname_inv = 'LAB.QUAKEML2'
		LAB = read_events(fname_inv, format='QUAKEML')
		if select_event == 16:
			t = LAB[1].origins[0].time
		elif select_event == 17:
			t = LAB[5].origins[0].time
		elif select_event == 18:
			t = LAB[14].origins[0].time
		elif select_event == 19:
			t = LAB[13].origins[0].time
	else:
		fname_inv = 'LAB.QUAKEML'
		LAB = read_events(fname_inv, format='QUAKEML')
		t = LAB[select_event].origins[0].time

	#print('event:',LAB)
	#plt.style.use('ggplot')
	#plt.rcParams['figure.figsize'] = 12, 8
	#LAB.plot(projection = 'local', resolution = 'h')

	#%% Make inventory of all stations in box recording this channel

		t = LAB[select_event].origins[0].time
	print(str(t))
	s_t = t - start_buff
	e_t = t + end_buff

	inventory = client.get_stations(starttime = s_t, endtime = e_t,
						channel=chan_type, level='response', network=network_sel,
						minlatitude  = min_lat, maxlatitude  = max_lat,
						minlongitude = min_lon, maxlongitude = max_lon)

	#print(inventory)
	print('inventory has ' + str(len(inventory)) + ' networks recording data')
	#for network in inventory:
	#	sta_cnt = 0
	#	for station in network:
	#		sta_cnt += sta_cnt
	#	print('Network ' + str(network) + ' has ' + str(sta_cnt) + ' stations to try')
	#inventory.plot(projection = 'local', resolution = 'h')  # not working

	#%% Check inventory of stations for traces at time of event
	cnt_try = 0
	cnt_got = 0
	for network in inventory:
		for station in network:
			if cnt_try % 20 == 0:
				print('Try ' + str(cnt_try) + ' got ' + str(cnt_got) + ' sgrams ' + str(len(st)))
			cnt_try += +1
			try:
				st += client.get_waveforms(network.code, station.code, location='*',channel=chan_type, starttime=s_t, endtime = e_t, attach_response=True)
				cnt_got += 1
			except:
				pass

	print(str(cnt_try) + ' stations examined, ' + str(cnt_got) + ' have data, ' + str(len(st)) + ' traces extracted  ')
	fname = 'event' + str(select_event) + '/event' + str(select_event) + 'N_all.mseed'
	st.write(fname,format = 'MSEED')
	#st=read(fname)

	for tr in st:
		print('Station ' + tr.stats.station + ' channel ' + tr.stats.channel)

	tr = Trace()
	hnn_chosen = 0
	ehn_chosen = 0
	hhn_chosen = 0
	hln_chosen = 0
	bhn_chosen = 0
	for tr in st:
		if tr.stats.channel == 'HNN':
			hnn_chosen += 1
		if tr.stats.channel == 'EHN':
			ehn_chosen += 1
		if tr.stats.channel == 'HHN':
			hhn_chosen += 1
		if tr.stats.channel == 'HLN':
			hln_chosen += 1
		if tr.stats.channel == 'BHN':
			bhn_chosen += 1

	print('Total channels ' + str(len(st)) + ' - HNN, EHN, HHN, HLN, BHN have '
		   + str(hnn_chosen) + ' ' + str(ehn_chosen) + ' '
		   + str(hhn_chosen) + ' ' + str(hln_chosen) + ' ' + str(bhn_chosen))

	for tr in st:
		if (tr.stats.network != 'CE') and (tr.stats.station != 'BVH') and (tr.stats.station != 'LAX'):
#		if tr.stats.network != 'CE':
			tr.remove_response(water_level=40, inventory=inventory, output='ACC')

	fname = 'event' + str(select_event) + '/event' + str(select_event) + 'N_decon.mseed'
	st.write(fname,format = 'MSEED')

	'''
	st=read('event14N_decon.mseed')
	'''

	tr2 = Trace()
	st_chosen = Stream()
	hhn_chosen = 0
	ehn_chosen = 0
	hnn_chosen = 0
	hln_chosen = 0
	bhn_chosen = 0
	for tr in st:
		if tr.stats.channel == 'HNN':
			st_chosen += tr
			hnn_chosen += 1
		elif tr.stats.channel == 'EHN':  # write EHN if present and BHN is not present
			skip = 0
			for tr2 in st:
				if tr2.stats.channel == 'HNN' and tr2.stats.station == tr.stats.station:
					skip = 1
			if skip == 0:
				st_chosen += tr
				ehn_chosen += 1
		elif tr.stats.channel == 'HHN':
			skip = 0
			for tr2 in st:
				if tr2.stats.channel == 'HNN' and tr2.stats.station == tr.stats.station:
					skip = 1
				if tr2.stats.channel == 'EHN' and tr2.stats.station == tr.stats.station:
					skip = 1
			if skip == 0:
				st_chosen += tr
				hhn_chosen += 1
		elif tr.stats.channel == 'HLN':
			skip = 0
			for tr2 in st:
				if tr2.stats.channel == 'HNN' and tr2.stats.station == tr.stats.station:
					skip = 1
				if tr2.stats.channel == 'EHN' and tr2.stats.station == tr.stats.station:
					skip = 1
				if tr2.stats.channel == 'HHN' and tr2.stats.station == tr.stats.station:
					skip = 1
			if skip == 0:
				st_chosen += tr
				hln_chosen += 1
		elif tr.stats.channel == 'BHN':
			skip = 0
			for tr2 in st:
				if tr2.stats.channel == 'HNN' and tr2.stats.station == tr.stats.station:
					skip = 1
				if tr2.stats.channel == 'EHN' and tr2.stats.station == tr.stats.station:
					skip = 1
				if tr2.stats.channel == 'HHN' and tr2.stats.station == tr.stats.station:
					skip = 1
				if tr2.stats.channel == 'HLN' and tr2.stats.station == tr.stats.station:
					skip = 1
			if skip == 0:
				st_chosen += tr
				bhn_chosen += 1

	for tr in st_chosen:
		print(tr.stats.station + '  ' + tr.stats.channel)

	print('Chosen - HNN, EHN, HHN, HLN, BHN have ' + str(hnn_chosen) + ' ' + str(ehn_chosen) +
		   ' ' + str(hhn_chosen) + ' ' + str(hln_chosen) + ' ' + str(bhn_chosen))
	print(str(len(st_chosen)) + ' traces in dataset')
	fname = 'event' + str(select_event) + '/event' + str(select_event) + 'N_chosen.mseed'
	st_chosen.write(fname,format = 'MSEED')
Ejemplo n.º 60
0
 def test_write_catalog(self):
     """
     Simple testing function for the write_catalogue function in \
     catalog_to_dd.
     """
     self.assertTrue(os.path.isfile('dt.ct'))
     # Check dt.ct file, should contain only a few linked events
     dt_file_out = open('dt.ct', 'r')
     event_pairs = []
     event_links = []
     event_pair = ''
     for i, line in enumerate(dt_file_out):
         if line[0] == '#':
             if i != 0:
                 # Check the number of links
                 self.assertTrue(len(event_links) >= self.minimum_links)
                 # Check the distance between events
                 event_1_name = [
                     event[1] for event in self.event_list
                     if event[0] == int(event_pair.split()[1])
                 ][0]
                 event_2_name = [
                     event[1] for event in self.event_list
                     if event[0] == int(event_pair.split()[2])
                 ][0]
                 event_1 = readheader(event_1_name)
                 event_2 = readheader(event_2_name)
                 event_1_location = (event_1.origins[0].latitude,
                                     event_1.origins[0].longitude,
                                     event_1.origins[0].depth / 1000)
                 event_2_location = (event_2.origins[0].latitude,
                                     event_2.origins[0].longitude,
                                     event_2.origins[0].depth / 1000)
                 hypocentral_seperation = dist_calc(event_1_location,
                                                    event_2_location)
                 self.assertTrue(
                     hypocentral_seperation < self.maximum_separation)
                 # Check that the differential times are accurate
                 event_1_picks = read_events(event_1_name)[0].picks
                 event_2_picks = read_events(event_2_name)[0].picks
                 for pick_pair in event_links:
                     station = pick_pair.split()[0]
                     event_1_travel_time_output = pick_pair.split()[1]
                     event_2_travel_time_output = pick_pair.split()[2]
                     # weight = pick_pair.split()[3]
                     phase = pick_pair.split()[4]
                     # Extract the relevant pick information from the
                     # two sfiles
                     for pick in event_1_picks:
                         if pick.waveform_id.station_code == station:
                             if pick.phase_hint[0].upper() == phase:
                                 event_1_pick = pick
                     for pick in event_2_picks:
                         if pick.waveform_id.station_code == station:
                             if pick.phase_hint[0].upper() == phase:
                                 event_2_pick = pick
                     # Calculate the travel-time
                     event_1_travel_time_input = event_1_pick.time -\
                         event_1.origins[0].time
                     event_2_travel_time_input = event_2_pick.time -\
                         event_2.origins[0].time
                     self.assertEqual(event_1_travel_time_input,
                                      float(event_1_travel_time_output))
                     self.assertEqual(event_2_travel_time_input,
                                      float(event_2_travel_time_output))
             event_pair = line
             event_pairs.append(line)
             event_links = []
         else:
             event_links.append(line)
     self.assertTrue(os.path.isfile('phase.dat'))
     dt_file_out.close()