Пример #1
0
 def _set_resource_id(self, value):
     if isinstance(value, dict):
         value = ResourceIdentifier(**value)
     elif type(value) != ResourceIdentifier:
         value = ResourceIdentifier(value)
     value.set_referred_object(self, warn=False)
     self.__dict__['resource_id'] = value
Пример #2
0
 def test_latest_in_scope_object_returned(self):
     """
     Test that the most recently defined object with the same resource_id,
     that is still in scope, is returned from the get_referred_object
     method
     """
     cat1 = read_events()
     # The resource_id attached to the first event is self-pointing
     self.assertIs(cat1[0], cat1[0].resource_id.get_referred_object())
     # make a copy and re-read catalog
     cat2 = cat1.copy()
     cat3 = read_events()
     # the resource_id on the new catalogs point to their attached objects
     self.assertIs(cat1[0], cat1[0].resource_id.get_referred_object())
     self.assertIs(cat2[0], cat2[0].resource_id.get_referred_object())
     self.assertIs(cat3[0], cat3[0].resource_id.get_referred_object())
     # now delete cat1 and make sure cat2 and cat3 still work
     del cat1
     self.assertIs(cat2[0], cat2[0].resource_id.get_referred_object())
     self.assertIs(cat3[0], cat3[0].resource_id.get_referred_object())
     # create a resource_id with the same id as the last defined object
     # with the same resource id (that is still in scope) is returned
     new_id = cat2[0].resource_id.id
     rid = ResourceIdentifier(new_id)
     self.assertIs(rid.get_referred_object(), cat3[0])
     del cat3
     self.assertIs(rid.get_referred_object(), cat2[0])
     del cat2
     self.assertIs(rid.get_referred_object(), None)
Пример #3
0
 def test_same_resource_id_different_referred_object(self):
     """
     Tests the handling of the case that different ResourceIdentifier
     instances are created that have the same resource id but different
     objects. This should not happen and thus a warning should be emitted.
     """
     object_a = UTCDateTime(1000)
     object_b = UTCDateTime(1001)
     self.assertEqual(object_a is object_b, False)
     id = 'obspy.org/tests/test_resource'
     res_a = ResourceIdentifier(id=id,
                                referred_object=object_a)
     # Now create a new resource with the same id but a different object.
     # This will raise a warning.
     with warnings.catch_warnings(record=True):
         warnings.simplefilter('error', UserWarning)
         self.assertRaises(UserWarning, ResourceIdentifier,
                           id=id,
                           referred_object=object_b)
         # Now ignore the warning and actually create the new
         # ResourceIdentifier.
         warnings.simplefilter('ignore', UserWarning)
         res_b = ResourceIdentifier(id=id,
                                    referred_object=object_b)
     # Object b was the last to added, thus all resource identifiers will
     # now point to it.
     self.assertEqual(object_b is res_a.get_referred_object(), True)
     self.assertEqual(object_b is res_b.get_referred_object(), True)
Пример #4
0
 def test_resource_id_valid_quakemluri(self):
     """
     Test that a resource identifier per default (i.e. no arguments to
     __init__()) gets set up with a QUAKEML conform ID.
     """
     rid = ResourceIdentifier()
     self.assertEqual(rid.id, rid.get_quakeml_uri())
Пример #5
0
 def test_getting_gc_no_shared_resource_id(self):
     """
     Test that calling get_referred_object on a resource id whose object
     has been garbage collected, and whose resource_id is unique,
     returns None
     """
     obj1 = UTCDateTime()
     rid1 = ResourceIdentifier(referred_object=obj1)
     # delete obj1, make sure rid1 return None
     del obj1
     self.assertIs(rid1.get_referred_object(), None)
Пример #6
0
 def test_getting_gc_with_shared_resource_id(self):
     """
     Test that calling get_referred_object on a resource id whose object
     has been garbage collected, but that has another object that shares
     the same resource_id, returns the other object with the same resource
     id and issues a warning
     """
     uri = 'testuri'
     obj1 = UTCDateTime(1000)
     obj2 = UTCDateTime(1000)
     rid1 = ResourceIdentifier(uri, referred_object=obj1)
     rid2 = ResourceIdentifier(uri, referred_object=obj2)
     self.assertFalse(rid1.get_referred_object() is
                      rid2.get_referred_object())
     self.assertNotEqual(rid1._object_id, rid2._object_id)
     del obj1
     warnings.simplefilter('default')
     with warnings.catch_warnings(record=True) as w:
         rid1.get_referred_object()
         self.assertEqual(len(w), 1)
         self.assertIn('The object with identity', str(w[0]))
     # now both rids should return the same object
     self.assertIs(rid1.get_referred_object(), rid2.get_referred_object())
     # the object id should now be bound to obj2
     self.assertEqual(rid1._object_id, rid2._object_id)
Пример #7
0
 def test_getting_gc_no_shared_resource_id(self):
     """
     Test that calling get_referred_object on a resource id whose object
     has been garbage collected, and whose resource_id is unique,
     returns None
     """
     obj1 = UTCDateTime()
     rid1 = ResourceIdentifier(referred_object=obj1)
     # delete obj1, make sure rid1 return None
     del obj1
     # raises UserWarning
     with warnings.catch_warnings():
         warnings.simplefilter("ignore", UserWarning)
         self.assertIs(rid1.get_referred_object(), None)
Пример #8
0
 def test_quakeml_regex(self):
     """
     Tests that regex used to check for QuakeML validatity actually works.
     """
     # This one contains all valid characters. It should pass the
     # validation.
     res_id = (
         "smi:abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ"
         "1234567890-.*()_~'/abcdefghijklmnopqrstuvwxyzABCDEFGHIKLMNOPQR"
         "STUVWXYZ0123456789-.*()_~'+?=,;&")
     res = ResourceIdentifier(res_id)
     self.assertEqual(res_id, res.get_quakeml_uri())
     # The id has to valid from start to end. Due to the spaces this cannot
     # automatically be converted to a correct one.
     res_id = ("something_before smi:local/something  something_after")
     res = ResourceIdentifier(res_id)
     self.assertRaises(ValueError, res.get_quakeml_uri)
     # A colon is an invalid character.
     res_id = ("smi:local/hello:yea")
     res = ResourceIdentifier(res_id)
     self.assertRaises(ValueError, res.get_quakeml_uri)
     # Space as well
     res_id = ("smi:local/hello yea")
     res = ResourceIdentifier(res_id)
     self.assertRaises(ValueError, res.get_quakeml_uri)
     # Dots are fine
     res_id = ("smi:local/hello....yea")
     res = ResourceIdentifier(res_id)
     self.assertEqual(res_id, res.get_quakeml_uri())
     # Hats not
     res_id = ("smi:local/hello^^yea")
     res = ResourceIdentifier(res_id)
     self.assertRaises(ValueError, res.get_quakeml_uri)
Пример #9
0
    def test_resource_ids_refer_to_newest_object(self):
        """
        Tests that resource ids which are assigned multiple times but point to
        identical objects always point to the newest object. This prevents some
        odd behaviour.
        """
        t1 = UTCDateTime(2010, 1, 1)
        t2 = UTCDateTime(2010, 1, 1)

        rid = ResourceIdentifier("a", referred_object=t1)
        rid = ResourceIdentifier("a", referred_object=t2)

        del t1

        self.assertEqual(rid.get_referred_object(), t2)
Пример #10
0
 def test_resources_in_global_dict_get_garbage_collected(self):
     """
     Tests that the ResourceIdentifiers in the class level resource dict get
     deleted if they have no other reference and the object they refer to
     goes out of scope.
     """
     obj_a = UTCDateTime()
     obj_b = UTCDateTime()
     res1 = ResourceIdentifier(referred_object=obj_a)
     res2 = ResourceIdentifier(referred_object=obj_b)
     # Now two keys should be in the global dict.
     rdict = ResourceIdentifier._ResourceIdentifier__resource_id_weak_dict
     self.assertEqual(len(list(rdict.keys())), 2)
     del obj_a, obj_b
     self.assertIs(res1.get_referred_object(), None)
     self.assertIs(res2.get_referred_object(), None)
Пример #11
0
 def test_error_message_for_failing_quakeml_id_conversion(self):
     """
     Converting an id to a QuakeML compatible id might fail. Test the
     error message.
     """
     invalid_id = "http://example.org"
     rid = ResourceIdentifier(invalid_id)
     with self.assertRaises(ValueError) as e:
         rid.get_quakeml_uri()
     self.assertEqual(
         e.exception.args[0],
         "The id 'http://example.org' is not a valid QuakeML resource "
         "identifier. ObsPy tried modifying it to "
         "'smi:local/http://example.org' but it is still not valid. Please "
         "make sure all resource ids are either valid or can be made valid "
         "by prefixing them with 'smi:<authority_id>/'. Valid ids are "
         "specified in the QuakeML manual section 3.1 and in particular "
         "exclude colons for the final part.")
Пример #12
0
 def test_resources_in_global_dict_get_garbage_collected(self):
     """
     Tests that the ResourceIdentifiers in the class level resource dict get
     deleted if they have no other reference and the object they refer to
     goes out of scope.
     """
     obj_a = UTCDateTime()
     obj_b = UTCDateTime()
     res1 = ResourceIdentifier(referred_object=obj_a)
     res2 = ResourceIdentifier(referred_object=obj_b)
     # Now two keys should be in the global dict.
     rdict = ResourceIdentifier._ResourceIdentifier__resource_id_weak_dict
     self.assertEqual(len(list(rdict.keys())), 2)
     # Deleting the objects should also remove the from the dictionary.
     del obj_a, obj_b
     self.assertEqual(len(list(rdict.keys())), 0)
     # references are still around but no longer have associates objects.
     self.assertEqual(res1.get_referred_object(), None)
     self.assertEqual(res2.get_referred_object(), None)
Пример #13
0
    def test_id_without_reference_not_in_global_list(self):
        """
        This tests some internal workings of the ResourceIdentifier class.
        NEVER modify the __resource_id_weak_dict!

        Only those ResourceIdentifiers that have a reference to an object that
        is referred to somewhere else should stay in the dictionary.
        """
        r_dict = ResourceIdentifier._ResourceIdentifier__resource_id_weak_dict
        _r1 = ResourceIdentifier()  # NOQA
        self.assertEqual(len(list(r_dict.keys())), 0)
        # Adding a ResourceIdentifier with an object that does not have a
        # reference will result in a dict that contains None, but that will
        # get removed when the resource_id goes out of scope
        _r2 = ResourceIdentifier(referred_object=UTCDateTime())  # NOQA
        self.assertEqual(_r2.get_referred_object(), None)
        del _r2  # delete rid to get its id out of r_dict keys
        # Give it a reference and it will stick around.
        obj = UTCDateTime()
        _r3 = ResourceIdentifier(referred_object=obj)  # NOQA
        self.assertEqual(len(list(r_dict.keys())), 1)
Пример #14
0
    def test_automatic_dereferring_if_resource_id_goes_out_of_scope(self):
        """
        Tests that objects that have no more referrer are no longer stored in
        the reference dictionary.
        """
        t1 = UTCDateTime(2010, 1, 1)  # test object
        r_dict = ResourceIdentifier._ResourceIdentifier__resource_id_weak_dict
        rid = 'a'  # test resource id

        # Create object and assert the reference has been created.
        r1 = ResourceIdentifier(rid, referred_object=t1)
        self.assertEqual(r1.get_referred_object(), t1)
        self.assertTrue(rid in r_dict)
        # Deleting the object should remove the reference.
        del r1
        self.assertFalse(rid in r_dict)
        # Now create two equal references.
        r1 = ResourceIdentifier(rid, referred_object=t1)
        r2 = ResourceIdentifier(rid, referred_object=t1)
        self.assertEqual(r1.get_referred_object(), t1)
        # Deleting one should not remove the reference.
        del r1
        self.assertEqual(r2.get_referred_object(), t1)
        self.assertTrue(rid in r_dict)
        # Deleting the second one should
        del r2
        self.assertFalse(rid in r_dict)
Пример #15
0
 def test_same_resource_id_different_referred_object(self):
     """
     Tests the handling of the case that different ResourceIdentifier
     instances are created that have the same resource id but different
     objects. The referred objects should still return the same objects
     used in the ResourceIdentifier construction or set_referred_object
     call. However, if an object is set to a resource_id that is not
     equal to the last object set it should issue a warning.
     """
     warnings.simplefilter('default')
     object_a = UTCDateTime(1000)
     object_b = UTCDateTime(1000)
     object_c = UTCDateTime(1001)
     self.assertFalse(object_a is object_b)
     id = 'obspy.org/tests/test_resource'
     res_a = ResourceIdentifier(id=id, referred_object=object_a)
     # Now create a new resource with the same id but a different object.
     # This should not raise a warning as the object a and b are equal.
     with warnings.catch_warnings(record=True) as w:
         res_b = ResourceIdentifier(id=id, referred_object=object_b)
         self.assertEqual(len(w), 0)
     # if the set object is not equal to the last object set to the same
     # resource_id, however, a warning should be issued.
     with warnings.catch_warnings(record=True) as w:
         res_c = ResourceIdentifier(id=id, referred_object=object_c)
         self.assertEqual(len(w), 1)
         expected_text = 'which is not equal to the last object bound'
         self.assertIn(expected_text, str(w[0]))
     # even though the resource_id are the same, the referred objects
     # should point to the original (different) objects
     self.assertIs(object_a, res_a.get_referred_object())
     self.assertIs(object_b, res_b.get_referred_object())
     self.assertIs(object_c, res_c.get_referred_object())
Пример #16
0
 def test_adding_a_referred_object_after_creation(self):
     """
     Check that the referred objects can also be made available after the
     ResourceIdentifier instances have been created.
     """
     obj = UTCDateTime()
     obj_id = id(obj)
     res_id = "obspy.org/time/test"
     ref_a = ResourceIdentifier(res_id)
     ref_b = ResourceIdentifier(res_id)
     ref_c = ResourceIdentifier(res_id)
     # All three will have no resource attached.
     self.assertEqual(ref_a.get_referred_object(), None)
     self.assertEqual(ref_b.get_referred_object(), None)
     self.assertEqual(ref_c.get_referred_object(), None)
     # Setting the object for one will make it available to all other
     # instances.
     ref_b.set_referred_object(obj)
     self.assertEqual(id(ref_a.get_referred_object()), obj_id)
     self.assertEqual(id(ref_b.get_referred_object()), obj_id)
     self.assertEqual(id(ref_c.get_referred_object()), obj_id)
Пример #17
0
def _channel_loop(detection, template, min_cc, interpolate=False, i=0,
                  debug=0):
    """
    Inner loop for correlating and assigning picks.

    Utility function to take a stream of data for the detected event and write
    maximum correlation to absolute time as picks in an obspy.core.event.Event
    object.
    Only outputs picks for picks above min_cc.

    :type detection: obspy.core.stream.Stream
    :param detection: Stream of data for the slave event detected using \
        template.
    :type template: obspy.core.stream.Stream
    :param template: Stream of data as the template for the detection.
    :type interpolate: bool
    :param interpolate: Interpolate the correlation function to achieve \
        sub-sample precision.
    :type i: int
    :param i: Used to track which process has occurred when running in \
        parallel.

    :returns: Event object containing net, sta, chan information
    :rtype: obspy.core.event.Event
    """
    from obspy.core.event import Event, Pick, WaveformStreamID
    from obspy.core.event import ResourceIdentifier
    event = Event()
    s_stachans = {}
    used_s_sta = []
    for tr in template:
        temp_net = tr.stats.network
        temp_sta = tr.stats.station
        temp_chan = tr.stats.channel
        image = detection.select(station=temp_sta,
                                 channel=temp_chan)
        if image:
            ccc = normxcorr2(tr.data, image[0].data)
            # Convert the maximum cross-correlation time to an actual time
            if debug > 3:
                print('********DEBUG: Maximum cross-corr=%s' % np.amax(ccc))
            if np.amax(ccc) > min_cc:
                if interpolate:
                    try:
                        interp_max = _xcorr_interp(ccc=ccc,
                                                   dt=image[0].stats.delta)
                    except IndexError:
                        print('Could not interpolate ccc, not smooth')
                        interp_max = np.argmax(ccc) * image[0].stats.delta
                    picktime = image[0].stats.starttime + interp_max
                else:
                    picktime = image[0].stats.starttime + (np.argmax(ccc) *
                                                           image[0].stats.delta)
            else:
                continue
            # Perhaps weight each pick by the cc val or cc val^2?
            # weight = np.amax(ccc) ** 2
            if temp_chan[-1:] == 'Z':
                phase = 'P'
            # Only take the S-pick with the best correlation
            elif temp_chan[-1:] in ['E', 'N']:
                phase = 'S'
                if temp_sta not in s_stachans and np.amax(ccc) > min_cc:
                    s_stachans[temp_sta] = ((temp_chan, np.amax(ccc),
                                             picktime))
                elif temp_sta in s_stachans and np.amax(ccc) > min_cc:
                    if np.amax(ccc) > s_stachans[temp_sta][1]:
                        picktime = picktime
                    else:
                        picktime = s_stachans[temp_sta][2]
                        temp_chan = s_stachans[temp_sta][0]
                elif np.amax(ccc) < min_cc and temp_sta not in used_s_sta:
                    used_s_sta.append(temp_sta)
                else:
                    continue
            else:
                phase = None
            _waveform_id = WaveformStreamID(network_code=temp_net,
                                            station_code=temp_sta,
                                            channel_code=temp_chan)
            event.picks.append(Pick(waveform_id=_waveform_id,
                                    time=picktime,
                                    method_id=ResourceIdentifier('EQcorrscan'),
                                    phase_hint=phase))
    return (i, event)
Пример #18
0
 def _comment(self, text):
     comment = Comment()
     comment.text = text
     comment.resource_id = ResourceIdentifier(prefix=self.res_id_prefix)
     return comment
Пример #19
0
 def test_quakeml_regex(self):
     """
     Tests that regex used to check for QuakeML validatity actually works.
     """
     # This one contains all valid characters. It should pass the
     # validation.
     res_id = (
         "smi:abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ"
         "1234567890-.*()_~'/abcdefghijklmnopqrstuvwxyzABCDEFGHIKLMNOPQR"
         "STUVWXYZ0123456789-.*()_~'+?=,;&")
     res = ResourceIdentifier(res_id)
     self.assertEqual(res_id, res.get_quakeml_uri())
     # The id has to valid from start to end. Due to the spaces this cannot
     # automatically be converted to a correct one.
     res_id = ("something_before smi:local/something  something_after")
     res = ResourceIdentifier(res_id)
     self.assertRaises(ValueError, res.get_quakeml_uri)
     # A colon is an invalid character.
     res_id = ("smi:local/hello:yea")
     res = ResourceIdentifier(res_id)
     self.assertRaises(ValueError, res.get_quakeml_uri)
     # Space as well
     res_id = ("smi:local/hello yea")
     res = ResourceIdentifier(res_id)
     self.assertRaises(ValueError, res.get_quakeml_uri)
     # Dots are fine
     res_id = ("smi:local/hello....yea")
     res = ResourceIdentifier(res_id)
     self.assertEqual(res_id, res.get_quakeml_uri())
     # Hats not
     res_id = ("smi:local/hello^^yea")
     res = ResourceIdentifier(res_id)
     self.assertRaises(ValueError, res.get_quakeml_uri)
Пример #20
0
    def _calculate_event(self,
                         template=None,
                         template_st=None,
                         estimate_origin=True,
                         correct_prepick=True):
        """
        Calculate an event for this detection using a given template.

        :type template: Template
        :param template: The template that made this detection
        :type template_st: `obspy.core.stream.Stream`
        :param template_st:
            Template stream, used to calculate pick times, not needed if
            template is given.
        :type estimate_origin: bool
        :param estimate_origin:
            Whether to include an estimate of the origin based on the template
            origin.
        :type correct_prepick: bool
        :param correct_prepick:
            Whether to apply the prepick correction defined in the template.
            Only applicable if template is not None

        .. rubric:: Note
            Works in place on Detection - over-writes previous events.
            Corrects for prepick if template given.
        """
        if template is not None and template.name != self.template_name:
            Logger.info("Template names do not match: {0}: {1}".format(
                template.name, self.template_name))
            return
        # Detect time must be valid QuakeML uri within resource_id.
        # This will write a formatted string which is still
        # readable by UTCDateTime
        det_time = str(self.detect_time.strftime('%Y%m%dT%H%M%S.%f'))
        ev = Event(resource_id=ResourceIdentifier(
            id=self.template_name + '_' + det_time, prefix='smi:local'))
        ev.creation_info = CreationInfo(author='EQcorrscan',
                                        creation_time=UTCDateTime())
        ev.comments.append(
            Comment(text="Template: {0}".format(self.template_name)))
        ev.comments.append(
            Comment(text='threshold={0}'.format(self.threshold)))
        ev.comments.append(
            Comment(text='detect_val={0}'.format(self.detect_val)))
        if self.chans is not None:
            ev.comments.append(
                Comment(text='channels used: {0}'.format(' '.join(
                    [str(pair) for pair in self.chans]))))
        if template is not None:
            template_st = template.st
            if correct_prepick:
                template_prepick = template.prepick
            else:
                template_prepick = 0
            template_picks = template.event.picks
        else:
            template_prepick = 0
            template_picks = []
        min_template_tm = min([tr.stats.starttime for tr in template_st])
        for tr in template_st:
            if (tr.stats.station, tr.stats.channel) \
                    not in self.chans:
                continue
            elif tr.stats.__contains__("not_in_original"):
                continue
            elif np.all(np.isnan(tr.data)):
                continue  # The channel contains no data and was not used.
            else:
                pick_time = self.detect_time + (tr.stats.starttime -
                                                min_template_tm)
                pick_time += template_prepick
                new_pick = Pick(time=pick_time,
                                waveform_id=WaveformStreamID(
                                    network_code=tr.stats.network,
                                    station_code=tr.stats.station,
                                    channel_code=tr.stats.channel,
                                    location_code=tr.stats.location))
                template_pick = [
                    p for p in template_picks
                    if p.waveform_id.get_seed_string() ==
                    new_pick.waveform_id.get_seed_string()
                ]
                if len(template_pick) == 0:
                    new_pick.phase_hint = None
                elif len(template_pick) == 1:
                    new_pick.phase_hint = template_pick[0].phase_hint
                else:
                    # Multiple picks for this trace in template
                    similar_traces = template_st.select(id=tr.id)
                    similar_traces.sort()
                    _index = similar_traces.traces.index(tr)
                    try:
                        new_pick.phase_hint = sorted(
                            template_pick,
                            key=lambda p: p.time)[_index].phase_hint
                    except IndexError:
                        Logger.error("No pick for trace")
                ev.picks.append(new_pick)
        if estimate_origin and template is not None:
            try:
                template_origin = (template.event.preferred_origin()
                                   or template.event.origins[0])
            except IndexError:
                template_origin = None
            if template_origin:
                for pick in ev.picks:
                    comparison_pick = [
                        p for p in template.event.picks
                        if p.waveform_id.get_seed_string() ==
                        pick.waveform_id.get_seed_string()
                    ]
                    comparison_pick = [
                        p for p in comparison_pick
                        if p.phase_hint == pick.phase_hint
                    ]
                    if len(comparison_pick) > 0:
                        break
                else:
                    Logger.error("Could not compute relative origin: no picks")
                    self.event = ev
                    return
                origin_time = pick.time - (comparison_pick[0].time -
                                           template_origin.time)
                # Calculate based on difference between pick and origin?
                _origin = Origin(
                    ResourceIdentifier(id="EQcorrscan/{0}_{1}".format(
                        self.template_name, det_time),
                                       prefix="smi:local"),
                    time=origin_time,
                    evaluation_mode="automatic",
                    evaluation_status="preliminary",
                    creation_info=CreationInfo(author='EQcorrscan',
                                               creation_time=UTCDateTime()),
                    comments=[
                        Comment(
                            text=
                            "Origin automatically assigned based on template"
                            " origin: use with caution.")
                    ],
                    latitude=template_origin.latitude,
                    longitude=template_origin.longitude,
                    depth=template_origin.depth,
                    time_errors=template_origin.time_errors,
                    latitude_errors=template_origin.latitude_errors,
                    longitude_errors=template_origin.longitude_errors,
                    depth_errors=template_origin.depth_errors,
                    depth_type=template_origin.depth_type,
                    time_fixed=False,
                    epicenter_fixed=template_origin.epicenter_fixed,
                    reference_system_id=template_origin.reference_system_id,
                    method_id=template_origin.method_id,
                    earth_model_id=template_origin.earth_model_id,
                    origin_type=template_origin.origin_type,
                    origin_uncertainty=template_origin.origin_uncertainty,
                    region=template_origin.region)
                ev.origins = [_origin]
        self.event = ev
        return self
Пример #21
0
    def test_creating_minimal_QuakeML_with_MT(self):
        """
        Tests the creation of a minimal QuakeML containing origin, magnitude
        and moment tensor.
        """
        # Rotate into physical domain
        lat, lon, depth, org_time = 10.0, -20.0, 12000, UTCDateTime(2012, 1, 1)
        mrr, mtt, mpp, mtr, mpr, mtp = 1E18, 2E18, 3E18, 3E18, 2E18, 1E18
        scalar_moment = math.sqrt(mrr**2 + mtt**2 + mpp**2 + mtr**2 + mpr**2 +
                                  mtp**2)
        moment_magnitude = 0.667 * (math.log10(scalar_moment) - 9.1)

        # Initialise event
        ev = Event(event_type="earthquake")

        ev_origin = Origin(time=org_time,
                           latitude=lat,
                           longitude=lon,
                           depth=depth,
                           resource_id=ResourceIdentifier())
        ev.origins.append(ev_origin)

        # populte event moment tensor
        ev_tensor = Tensor(m_rr=mrr,
                           m_tt=mtt,
                           m_pp=mpp,
                           m_rt=mtr,
                           m_rp=mpr,
                           m_tp=mtp)

        ev_momenttensor = MomentTensor(tensor=ev_tensor)
        ev_momenttensor.scalar_moment = scalar_moment
        ev_momenttensor.derived_origin_id = ev_origin.resource_id

        ev_focalmechanism = FocalMechanism(moment_tensor=ev_momenttensor)
        ev.focal_mechanisms.append(ev_focalmechanism)

        # populate event magnitude
        ev_magnitude = Magnitude()
        ev_magnitude.mag = moment_magnitude
        ev_magnitude.magnitude_type = 'Mw'
        ev_magnitude.evaluation_mode = 'automatic'
        ev.magnitudes.append(ev_magnitude)

        # write QuakeML file
        cat = Catalog(events=[ev])
        memfile = StringIO.StringIO()
        cat.write(memfile, format="quakeml", validate=True)

        memfile.seek(0, 0)
        new_cat = readQuakeML(memfile)
        self.assertEqual(len(new_cat), 1)
        event = new_cat[0]
        self.assertEqual(len(event.origins), 1)
        self.assertEqual(len(event.magnitudes), 1)
        self.assertEqual(len(event.focal_mechanisms), 1)
        org = event.origins[0]
        mag = event.magnitudes[0]
        fm = event.focal_mechanisms[0]
        self.assertEqual(org.latitude, lat)
        self.assertEqual(org.longitude, lon)
        self.assertEqual(org.depth, depth)
        self.assertEqual(org.time, org_time)
        # Moment tensor.
        mt = fm.moment_tensor.tensor
        self.assertTrue((fm.moment_tensor.scalar_moment - scalar_moment) /
                        scalar_moment < scalar_moment * 1E-10)
        self.assertEqual(mt.m_rr, mrr)
        self.assertEqual(mt.m_pp, mpp)
        self.assertEqual(mt.m_tt, mtt)
        self.assertEqual(mt.m_rt, mtr)
        self.assertEqual(mt.m_rp, mpr)
        self.assertEqual(mt.m_tp, mtp)
        # Mag
        self.assertAlmostEqual(mag.mag, moment_magnitude)
        self.assertEqual(mag.magnitude_type, "Mw")
        self.assertEqual(mag.evaluation_mode, "automatic")
Пример #22
0
 def test_origin(self):
     """
     Tests Origin object.
     """
     filename = os.path.join(self.path, 'quakeml_1.2_origin.xml')
     catalog = readQuakeML(filename)
     self.assertEqual(len(catalog), 1)
     self.assertEqual(len(catalog[0].origins), 1)
     origin = catalog[0].origins[0]
     self.assertEqual(
         origin.resource_id,
         ResourceIdentifier(
             'smi:www.iris.edu/ws/event/query?originId=7680412'))
     self.assertEqual(origin.time, UTCDateTime("2011-03-11T05:46:24.1200"))
     self.assertEqual(origin.latitude, 38.297)
     self.assertEqual(origin.latitude_errors.lower_uncertainty, None)
     self.assertEqual(origin.longitude, 142.373)
     self.assertEqual(origin.longitude_errors.uncertainty, None)
     self.assertEqual(origin.depth, 29.0)
     self.assertEqual(origin.depth_errors.confidence_level, 50.0)
     self.assertEqual(origin.depth_type, "from location")
     self.assertEqual(origin.method_id,
                      ResourceIdentifier(resource_id="smi:some/method/NA"))
     self.assertEqual(origin.time_fixed, None)
     self.assertEqual(origin.epicenter_fixed, False)
     self.assertEqual(
         origin.reference_system_id,
         ResourceIdentifier(resource_id="smi:some/reference/muh"))
     self.assertEqual(origin.earth_model_id,
                      ResourceIdentifier(resource_id="smi:same/model/maeh"))
     self.assertEqual(origin.evaluation_mode, "manual")
     self.assertEqual(origin.evaluation_status, "preliminary")
     self.assertEqual(origin.origin_type, "hypocenter")
     # composite times
     self.assertEqual(len(origin.composite_times), 2)
     c = origin.composite_times
     self.assertEqual(c[0].year, 2029)
     self.assertEqual(c[0].month, None)
     self.assertEqual(c[0].day, None)
     self.assertEqual(c[0].hour, 12)
     self.assertEqual(c[0].minute, None)
     self.assertEqual(c[0].second, None)
     self.assertEqual(c[1].year, None)
     self.assertEqual(c[1].month, None)
     self.assertEqual(c[1].day, None)
     self.assertEqual(c[1].hour, 1)
     self.assertEqual(c[1].minute, None)
     self.assertEqual(c[1].second, 29.124234)
     # quality
     self.assertEqual(origin.quality.used_station_count, 16)
     self.assertEqual(origin.quality.standard_error, 0)
     self.assertEqual(origin.quality.azimuthal_gap, 231)
     self.assertEqual(origin.quality.maximum_distance, 53.03)
     self.assertEqual(origin.quality.minimum_distance, 2.45)
     self.assertEqual(origin.quality.associated_phase_count, None)
     self.assertEqual(origin.quality.associated_station_count, None)
     self.assertEqual(origin.quality.depth_phase_count, None)
     self.assertEqual(origin.quality.secondary_azimuthal_gap, None)
     self.assertEqual(origin.quality.ground_truth_level, None)
     self.assertEqual(origin.quality.median_distance, None)
     # comments
     self.assertEqual(len(origin.comments), 2)
     c = origin.comments
     self.assertEqual(c[0].text, 'Some comment')
     self.assertEqual(
         c[0].resource_id,
         ResourceIdentifier(resource_id="smi:some/comment/reference"))
     self.assertEqual(c[0].creation_info.author, 'EMSC')
     self.assertEqual(c[1].resource_id, None)
     self.assertEqual(c[1].creation_info, None)
     self.assertEqual(c[1].text, 'Another comment')
     # creation info
     self.assertEqual(origin.creation_info.author, "NEIC")
     self.assertEqual(origin.creation_info.agency_id, None)
     self.assertEqual(origin.creation_info.author_uri, None)
     self.assertEqual(origin.creation_info.agency_uri, None)
     self.assertEqual(origin.creation_info.creation_time, None)
     self.assertEqual(origin.creation_info.version, None)
     # origin uncertainty
     u = origin.origin_uncertainty
     self.assertEqual(u.preferred_description, "uncertainty ellipse")
     self.assertEqual(u.horizontal_uncertainty, 9000)
     self.assertEqual(u.min_horizontal_uncertainty, 6000)
     self.assertEqual(u.max_horizontal_uncertainty, 10000)
     self.assertEqual(u.azimuth_max_horizontal_uncertainty, 80.0)
     # confidence ellipsoid
     c = u.confidence_ellipsoid
     self.assertEqual(c.semi_intermediate_axis_length, 2.123)
     self.assertEqual(c.major_axis_rotation, 5.123)
     self.assertEqual(c.major_axis_plunge, 3.123)
     self.assertEqual(c.semi_minor_axis_length, 1.123)
     self.assertEqual(c.semi_major_axis_length, 0.123)
     self.assertEqual(c.major_axis_azimuth, 4.123)
     # exporting back to XML should result in the same document
     original = open(filename, "rt").read()
     processed = Pickler().dumps(catalog)
     self._compareStrings(original, processed)
Пример #23
0
 def cat1_bad_arrival_pick_id(self, cat1):
     """Create a catalog with a bad arrival (no id)"""
     cat = cat1.copy()
     rid = ResourceIdentifier()
     cat[0].origins[0].arrivals[0].pick_id = rid
     return cat
Пример #24
0
# Extract just the
for group in groups:
    if len(group) > 7:
        big_group_ids.append(list(zip(*group)[1]))
        big_group_streams.append(list(zip(*group)[0]))
for i, group_ids in enumerate(big_group_ids):
    file_names = '/home/chet/data/mrp_data/catalogs/2015/final/thresh_' +\
    str(corr_thresh) + '_group_' + str(i)
    temp_cat = Catalog()
    with open(file_names + '.csv', 'wb') as f:
        csvwriter = csv.writer(f, delimiter=',')
        for event in cat:
            ev_name = str(event.resource_id).split('/')[-1:][0]
            if ev_name in group_ids:
                x = str(event.preferred_origin().longitude)
                y = str(event.preferred_origin().latitude)
                z = str(event.preferred_origin().depth)
                csvwriter.writerow([x, y, z])
                temp_cat.append(event)
    temp_cat.write(file_names + '.shp', format="SHAPEFILE")

# Below we'll plot picks over templates for given indices
ev_id = '2015sora495962'
res_id = ResourceIdentifier('smi:org.gfz-potsdam.de/geofon/2015sora495962')
for event in cat:
    if event.resource_id == res_id:
        test_ev = event
for i, group_id in enumerate(big_group_ids):
    if group_id == ev_id:
        pretty_template_plot(big_group_streams[i], picks=test_ev.picks)
Пример #25
0
 def _construct_id(self, parts, add_hash=False):
     id_ = '/'.join([str(self.cat.resource_id)] + list(parts))
     if add_hash and not self._no_uuid_hashes:
         id_ = str(ResourceIdentifier(prefix=id_))
     return id_
Пример #26
0
    def relative_magnitudes(self,
                            stream,
                            pre_processed,
                            process_cores=1,
                            ignore_bad_data=False,
                            parallel=False,
                            min_cc=0.4,
                            **kwargs):
        """
        Compute relative magnitudes for the detections.

        Works in place on events in the Family

        :type stream: obspy.core.stream.Stream
        :param stream:
            All the data needed to cut from - can be a gappy Stream.
        :type pre_processed: bool
        :param pre_processed:
            Whether the stream has been pre-processed or not to match the
            templates. See note below.
        :param parallel: Turn parallel processing on or off.
        :type process_cores: int
        :param process_cores:
            Number of processes to use for pre-processing (if different to
            `cores`).
        :type ignore_bad_data: bool
        :param ignore_bad_data:
            If False (default), errors will be raised if data are excessively
            gappy or are mostly zeros. If True then no error will be raised,
            but an empty trace will be returned (and not used in detection).
        :type min_cc: float
        :param min_cc: Minimum correlation for magnitude to be computed.
        :param kwargs:
            Keyword arguments passed to `utils.mag_calc.relative_mags`

        .. Note::
            Note on pre-processing: You can provide a pre-processed stream,
            which may be beneficial for detections over large time periods
            (the stream can have gaps, which reduces memory usage).  However,
            in this case the processing steps are not checked, so you must
            ensure that the template in the Family has the same sampling
            rate and filtering as the stream.
            If pre-processing has not be done then the data will be processed
            according to the parameters in the template.
        """
        processed_stream = self._process_streams(
            stream=stream,
            pre_processed=pre_processed,
            process_cores=process_cores,
            parallel=parallel,
            ignore_bad_data=ignore_bad_data)
        for detection in self.detections:
            event = detection.event
            if event is None:
                continue
            corr_dict = {
                p.waveform_id.get_seed_string():
                float(p.comments[0].text.split("=")[-1])
                for p in event.picks
            }
            template = self.template
            try:
                t_mag = (template.event.preferred_magnitude()
                         or template.event.magnitudes[0])
            except IndexError:
                Logger.info("No template magnitude, relative magnitudes cannot"
                            " be computed for {0}".format(event.resource_id))
                continue
            # Set the signal-window to be the template length
            signal_window = (
                -template.prepick,
                min([tr.stats.npts * tr.stats.delta
                     for tr in template.st]) - template.prepick)
            delta_mag = relative_magnitude(st1=template.st,
                                           st2=processed_stream,
                                           event1=template.event,
                                           event2=event,
                                           correlations=corr_dict,
                                           min_cc=min_cc,
                                           signal_window=signal_window,
                                           **kwargs)
            # Add station magnitudes
            sta_contrib = []
            av_mag = 0.0
            for seed_id, _delta_mag in delta_mag.items():
                sta_mag = StationMagnitude(
                    mag=t_mag.mag + _delta_mag,
                    magnitude_type=t_mag.magnitude_type,
                    method_id=ResourceIdentifier("relative"),
                    waveform_id=WaveformStreamID(seed_string=seed_id),
                    creation_info=CreationInfo(author="EQcorrscan",
                                               creation_time=UTCDateTime()))
                event.station_magnitudes.append(sta_mag)
                sta_contrib.append(
                    StationMagnitudeContribution(
                        station_magnitude_id=sta_mag.resource_id, weight=1.))
                av_mag += sta_mag.mag
            if len(delta_mag) > 0:
                av_mag /= len(delta_mag)
                # Compute average magnitude
                event.magnitudes.append(
                    Magnitude(mag=av_mag,
                              magnitude_type=t_mag.magnitude_type,
                              method_id=ResourceIdentifier("relative"),
                              station_count=len(delta_mag),
                              evaluation_mode="manual",
                              station_magnitude_contributions=sta_contrib,
                              creation_info=CreationInfo(
                                  author="EQcorrscan",
                                  creation_time=UTCDateTime())))
        return self.catalog
Пример #27
0
def autoPyLoT(input_dict=None,
              parameter=None,
              inputfile=None,
              fnames=None,
              eventid=None,
              savepath=None,
              savexml=True,
              station='all',
              iplot=0,
              ncores=0):
    """
    Determine phase onsets automatically utilizing the automatic picking
    algorithms by Kueperkoch et al. 2010/2012.

    :param inputfile: path to the input file containing all parameter
    information for automatic picking (for formatting details, see.
    `~pylot.core.io.inputs.PylotParameter`
    :type inputfile: str
    :return:

    .. rubric:: Example

    """

    if ncores == 1:
        sp_info = 'autoPyLoT is running serial on 1 cores.'
    else:
        if ncores == 0:
            ncores_readable = 'all available'
        else:
            ncores_readable = ncores
        sp_info = 'autoPyLoT is running in parallel on {} cores.'.format(
            ncores_readable)

    splash = '''************************************\n
                *********autoPyLoT starting*********\n
                The Python picking and Location Tool\n
                Version {version} 2017\n
                \n
                Authors:\n
                L. Kueperkoch (BESTEC GmbH, Landau i. d. Pfalz)\n
                M. Paffrath (Ruhr-Universitaet Bochum)\n
                S. Wehling-Benatelli (Ruhr-Universitaet Bochum)\n
                
                {sp}
                ***********************************'''.format(
        version=_getVersionString(), sp=sp_info)
    print(splash)

    parameter = real_None(parameter)
    inputfile = real_None(inputfile)
    eventid = real_None(eventid)

    fig_dict = None
    fig_dict_wadatijack = None

    locflag = 1
    if input_dict and isinstance(input_dict, dict):
        if 'parameter' in input_dict:
            parameter = input_dict['parameter']
        if 'fig_dict' in input_dict:
            fig_dict = input_dict['fig_dict']
        if 'fig_dict_wadatijack' in input_dict:
            fig_dict_wadatijack = input_dict['fig_dict_wadatijack']
        if 'station' in input_dict:
            station = input_dict['station']
        if 'fnames' in input_dict:
            fnames = input_dict['fnames']
        if 'eventid' in input_dict:
            eventid = input_dict['eventid']
        if 'iplot' in input_dict:
            iplot = input_dict['iplot']
        if 'locflag' in input_dict:
            locflag = input_dict['locflag']
        if 'savexml' in input_dict:
            savexml = input_dict['savexml']

    if not parameter:
        if inputfile:
            parameter = PylotParameter(inputfile)
            #iplot = parameter['iplot']
        else:
            infile = os.path.join(os.path.expanduser('~'), '.pylot',
                                  'pylot.in')
            print('Using default input file {}'.format(infile))
            parameter = PylotParameter(infile)
    else:
        if not type(parameter) == PylotParameter:
            print('Wrong input type for parameter: {}'.format(type(parameter)))
            return
        if inputfile:
            print(
                'Parameters set and input file given. Choose either of both.')
            return

    evt = None

    # reading parameter file
    if parameter.hasParam('datastructure'):
        # getting information on data structure
        datastructure = DATASTRUCTURE[parameter.get('datastructure')]()
        dsfields = {
            'root': parameter.get('rootpath'),
            'dpath': parameter.get('datapath'),
            'dbase': parameter.get('database')
        }

        exf = ['root', 'dpath', 'dbase']

        if parameter['eventID'] is not '*' and fnames == 'None':
            dsfields['eventID'] = parameter['eventID']
            exf.append('eventID')

        datastructure.modifyFields(**dsfields)
        datastructure.setExpandFields(exf)

        # check if default location routine NLLoc is available
        if real_None(parameter['nllocbin']) and locflag:
            # get NLLoc-root path
            nllocroot = parameter.get('nllocroot')
            # get path to NLLoc executable
            nllocbin = parameter.get('nllocbin')
            nlloccall = '%s/NLLoc' % nllocbin
            # get name of phase file
            phasef = parameter.get('phasefile')
            phasefile = '%s/obs/%s' % (nllocroot, phasef)
            # get name of NLLoc-control file
            ctrf = parameter.get('ctrfile')
            ctrfile = '%s/run/%s' % (nllocroot, ctrf)
            # pattern of NLLoc ttimes from location grid
            ttpat = parameter.get('ttpatter')
            # pattern of NLLoc-output file
            nllocoutpatter = parameter.get('outpatter')
            maxnumit = 3  # maximum number of iterations for re-picking
        else:
            locflag = 0
            print("                 !!!              ")
            print(
                "!!No location routine available, autoPyLoT is running in non-location mode!!"
            )
            print("!!No source parameter estimation possible!!")
            print("                 !!!              ")

        if not input_dict:
            # started in production mode
            datapath = datastructure.expandDataPath()
            if fnames == 'None' and parameter['eventID'] is '*':
                # multiple event processing
                # read each event in database
                events = [
                    events for events in glob.glob(os.path.join(datapath, '*'))
                    if os.path.isdir(events)
                ]
            elif fnames == 'None' and parameter[
                    'eventID'] is not '*' and not type(
                        parameter['eventID']) == list:
                # single event processing
                events = glob.glob(os.path.join(datapath,
                                                parameter['eventID']))
            elif fnames == 'None' and type(parameter['eventID']) == list:
                # multiple event processing
                events = []
                for eventID in parameter['eventID']:
                    events.append(os.path.join(datapath, eventID))
            else:
                # autoPyLoT was initialized from GUI
                events = []
                events.append(eventid)
                evID = os.path.split(eventid)[-1]
                locflag = 2
        else:
            # started in tune or interactive mode
            datapath = os.path.join(parameter['rootpath'],
                                    parameter['datapath'])
            events = []
            for eventID in eventid:
                events.append(
                    os.path.join(datapath, parameter['database'], eventID))

        if not events:
            print('autoPyLoT: No events given. Return!')
            return

        # transform system path separator to '/'
        for index, eventpath in enumerate(events):
            eventpath = eventpath.replace(SEPARATOR, '/')
            events[index] = eventpath

        allpicks = {}
        glocflag = locflag
        for eventpath in events:
            evID = os.path.split(eventpath)[-1]
            fext = '.xml'
            filename = os.path.join(eventpath, 'PyLoT_' + evID + fext)
            try:
                data = Data(evtdata=filename)
                data.get_evt_data().path = eventpath
                print(
                    'Reading event data from filename {}...'.format(filename))
            except Exception as e:
                print('Could not read event from file {}: {}'.format(
                    filename, e))
                data = Data()
                pylot_event = Event(
                    eventpath)  # event should be path to event directory
                data.setEvtData(pylot_event)
            if fnames == 'None':
                data.setWFData(
                    glob.glob(os.path.join(datapath, eventpath, '*')))
                # the following is necessary because within
                # multiple event processing no event ID is provided
                # in autopylot.in
                try:
                    parameter.get('eventID')
                except:
                    now = datetime.datetime.now()
                    eventID = '%d%02d%02d%02d%02d' % (
                        now.year, now.month, now.day, now.hour, now.minute)
                    parameter.setParam(eventID=eventID)
            else:
                data.setWFData(fnames)

                eventpath = events[0]
                # now = datetime.datetime.now()
                # evID = '%d%02d%02d%02d%02d' % (now.year,
                #                               now.month,
                #                               now.day,
                #                               now.hour,
                #                               now.minute)
                parameter.setParam(eventID=eventid)
            wfdat = data.getWFData()  # all available streams
            if not station == 'all':
                wfdat = wfdat.select(station=station)
                if not wfdat:
                    print('Could not find station {}. STOP!'.format(station))
                    return
            wfdat = remove_underscores(wfdat)
            # trim components for each station to avoid problems with different trace starttimes for one station
            wfdat = check4gaps(wfdat)
            wfdat = check4doubled(wfdat)
            wfdat = trim_station_components(wfdat,
                                            trim_start=True,
                                            trim_end=False)
            metadata = read_metadata(parameter.get('invdir'))
            # rotate stations to ZNE
            wfdat = check4rotated(wfdat, metadata)
            corr_dat = None
            if locflag:
                print("Restitute data ...")
                corr_dat = restitute_data(wfdat.copy(),
                                          *metadata,
                                          ncores=ncores)
            if not corr_dat and locflag:
                locflag = 2
            print('Working on event %s. Stations: %s' % (eventpath, station))
            print(wfdat)
            ##########################################################
            # !automated picking starts here!
            fdwj = None
            if fig_dict_wadatijack:
                fdwj = fig_dict_wadatijack[evID]
            picks = autopickevent(wfdat,
                                  parameter,
                                  iplot=iplot,
                                  fig_dict=fig_dict,
                                  fig_dict_wadatijack=fdwj,
                                  ncores=ncores,
                                  metadata=metadata,
                                  origin=data.get_evt_data().origins)
            ##########################################################
            # locating
            if locflag > 0:
                # write phases to NLLoc-phase file
                nll.export(picks, phasefile, parameter)

                # For locating the event the NLLoc-control file has to be modified!
                nllocout = '%s_%s' % (evID, nllocoutpatter)
                # create comment line for NLLoc-control file
                nll.modify_inputs(ctrf, nllocroot, nllocout, phasef, ttpat)

                # locate the event
                nll.locate(ctrfile, inputfile)

                # !iterative picking if traces remained unpicked or occupied with bad picks!
                # get theoretical onset times for picks with weights >= 4
                # in order to reprocess them using smaller time windows around theoretical onset
                # get stations with bad onsets
                badpicks = []
                for key in picks:
                    if picks[key]['P']['weight'] >= 4 or picks[key]['S'][
                            'weight'] >= 4:
                        badpicks.append([key, picks[key]['P']['mpp']])

                # TODO keep code DRY (Don't Repeat Yourself) the following part is written twice
                # suggestion: delete block and modify the later similar block to work properly

                if len(badpicks) == 0:
                    print(
                        "autoPyLoT: No bad onsets found, thus no iterative picking necessary!"
                    )
                    # get NLLoc-location file
                    locsearch = '%s/loc/%s.????????.??????.grid?.loc.hyp' % (
                        nllocroot, nllocout)
                    if len(glob.glob(locsearch)) > 0:
                        # get latest NLLoc-location file if several are available
                        nllocfile = max(glob.glob(locsearch),
                                        key=os.path.getctime)
                        evt = read_events(nllocfile)[0]
                        # calculate seismic moment Mo and moment magnitude Mw
                        moment_mag = MomentMagnitude(corr_dat, evt,
                                                     parameter.get('vp'),
                                                     parameter.get('Qp'),
                                                     parameter.get('rho'),
                                                     True, iplot)
                        # update pick with moment property values (w0, fc, Mo)
                        for stats, props in moment_mag.moment_props.items():
                            picks[stats]['P'].update(props)
                        evt = moment_mag.updated_event()
                        net_mw = moment_mag.net_magnitude()
                        print("Network moment magnitude: %4.1f" % net_mw.mag)
                        # calculate local (Richter) magntiude
                        WAscaling = parameter.get('WAscaling')
                        magscaling = parameter.get('magscaling')
                        local_mag = LocalMagnitude(corr_dat, evt,
                                                   parameter.get('sstop'),
                                                   WAscaling, True, iplot)
                        for stats, amplitude in local_mag.amplitudes.items():
                            picks[stats]['S'][
                                'Ao'] = amplitude.generic_amplitude
                        print("Local station magnitudes scaled with:")
                        print("log(Ao) + %f * log(r) + %f * r + %f" %
                              (WAscaling[0], WAscaling[1], WAscaling[2]))
                        evt = local_mag.updated_event(magscaling)
                        net_ml = local_mag.net_magnitude(magscaling)
                        print("Network local magnitude: %4.1f" % net_ml.mag)
                        print("Network local magnitude scaled with:")
                        print("%f * Ml + %f" % (magscaling[0], magscaling[1]))
                    else:
                        print("autoPyLoT: No NLLoc-location file available!")
                        print("No source parameter estimation possible!")
                        locflag = 9
                else:
                    # get theoretical P-onset times from NLLoc-location file
                    locsearch = '%s/loc/%s.????????.??????.grid?.loc.hyp' % (
                        nllocroot, nllocout)
                    if len(glob.glob(locsearch)) > 0:
                        # get latest file if several are available
                        nllocfile = max(glob.glob(locsearch),
                                        key=os.path.getctime)
                        nlloccounter = 0
                        while len(badpicks) > 0 and nlloccounter <= maxnumit:
                            nlloccounter += 1
                            if nlloccounter > maxnumit:
                                print(
                                    "autoPyLoT: Number of maximum iterations reached, stop iterative picking!"
                                )
                                break
                            print(
                                "autoPyLoT: Starting with iteration No. %d ..."
                                % nlloccounter)
                            if input_dict:
                                if 'fig_dict' in input_dict:
                                    fig_dict = input_dict['fig_dict']
                                    picks = iteratepicker(wfdat,
                                                          nllocfile,
                                                          picks,
                                                          badpicks,
                                                          parameter,
                                                          fig_dict=fig_dict)
                            else:
                                picks = iteratepicker(wfdat, nllocfile, picks,
                                                      badpicks, parameter)
                            # write phases to NLLoc-phase file
                            nll.export(picks, phasefile, parameter)
                            # remove actual NLLoc-location file to keep only the last
                            os.remove(nllocfile)
                            # locate the event
                            nll.locate(ctrfile, inputfile)
                            print("autoPyLoT: Iteration No. %d finished." %
                                  nlloccounter)
                            # get updated NLLoc-location file
                            nllocfile = max(glob.glob(locsearch),
                                            key=os.path.getctime)
                            # check for bad picks
                            badpicks = []
                            for key in picks:
                                if picks[key]['P']['weight'] >= 4 or picks[
                                        key]['S']['weight'] >= 4:
                                    badpicks.append(
                                        [key, picks[key]['P']['mpp']])
                            print(
                                "autoPyLoT: After iteration No. %d: %d bad onsets found ..."
                                % (nlloccounter, len(badpicks)))
                            if len(badpicks) == 0:
                                print(
                                    "autoPyLoT: No more bad onsets found, stop iterative picking!"
                                )
                                nlloccounter = maxnumit
                        evt = read_events(nllocfile)[0]
                        if locflag < 2:
                            # calculate seismic moment Mo and moment magnitude Mw
                            moment_mag = MomentMagnitude(
                                corr_dat, evt, parameter.get('vp'),
                                parameter.get('Qp'), parameter.get('rho'),
                                True, iplot)
                            # update pick with moment property values (w0, fc, Mo)
                            for stats, props in moment_mag.moment_props.items(
                            ):
                                if picks.has_key(stats):
                                    picks[stats]['P'].update(props)
                            evt = moment_mag.updated_event()
                            net_mw = moment_mag.net_magnitude()
                            print("Network moment magnitude: %4.1f" %
                                  net_mw.mag)
                            # calculate local (Richter) magntiude
                            WAscaling = parameter.get('WAscaling')
                            magscaling = parameter.get('magscaling')
                            local_mag = LocalMagnitude(corr_dat, evt,
                                                       parameter.get('sstop'),
                                                       WAscaling, True, iplot)
                            for stats, amplitude in local_mag.amplitudes.items(
                            ):
                                if picks.has_key(stats):
                                    picks[stats]['S'][
                                        'Ao'] = amplitude.generic_amplitude
                            print("Local station magnitudes scaled with:")
                            print("log(Ao) + %f * log(r) + %f * r + %f" %
                                  (WAscaling[0], WAscaling[1], WAscaling[2]))
                            evt = local_mag.updated_event(magscaling)
                            net_ml = local_mag.net_magnitude(magscaling)
                            print("Network local magnitude: %4.1f" %
                                  net_ml.mag)
                            print("Network local magnitude scaled with:")
                            print("%f * Ml + %f" %
                                  (magscaling[0], magscaling[1]))
                    else:
                        print(
                            "autoPyLoT: No NLLoc-location file available! Stop iteration!"
                        )
                        locflag = 9
            ##########################################################
            # write phase files for various location
            # and fault mechanism calculation routines
            # ObsPy event object
            if evt is not None:
                event_id = eventpath.split('/')[-1]
                evt.resource_id = ResourceIdentifier('smi:local/' + event_id)
                data.applyEVTData(evt, 'event')
            data.applyEVTData(picks)
            if savexml:
                if savepath == 'None' or savepath == None:
                    saveEvtPath = eventpath
                else:
                    saveEvtPath = savepath
                fnqml = '%s/PyLoT_%s' % (saveEvtPath, evID)
                data.exportEvent(fnqml,
                                 fnext='.xml',
                                 fcheck=['auto', 'magnitude', 'origin'])
            if locflag == 1:
                # HYPO71
                hypo71file = '%s/PyLoT_%s_HYPO71_phases' % (eventpath, evID)
                hypo71.export(picks, hypo71file, parameter)
                # HYPOSAT
                hyposatfile = '%s/PyLoT_%s_HYPOSAT_phases' % (eventpath, evID)
                hyposat.export(picks, hyposatfile, parameter)
                # VELEST
                velestfile = '%s/PyLoT_%s_VELEST_phases.cnv' % (eventpath,
                                                                evID)
                velest.export(picks, velestfile, evt, parameter)
                # hypoDD
                hypoddfile = '%s/PyLoT_%s_hypoDD_phases.pha' % (eventpath,
                                                                evID)
                hypodd.export(picks, hypoddfile, parameter, evt)
                # FOCMEC
                focmecfile = '%s/PyLoT_%s_FOCMEC.in' % (eventpath, evID)
                focmec.export(picks, focmecfile, parameter, evt)
                # HASH
                hashfile = '%s/PyLoT_%s_HASH' % (eventpath, evID)
                hash.export(picks, hashfile, parameter, evt)

            endsplash = '''------------------------------------------\n'
                           -----Finished event %s!-----\n'
                           ------------------------------------------'''.format \
                            (version=_getVersionString()) % evID
            print(endsplash)
            locflag = glocflag
            if locflag == 0:
                print("autoPyLoT was running in non-location mode!")

            # save picks for current event ID to dictionary with ALL picks
            allpicks[evID] = picks

    endsp = '''####################################\n
               ************************************\n
               *********autoPyLoT terminates*******\n
               The Python picking and Location Tool\n
               ************************************'''.format(
        version=_getVersionString())
    print(endsp)
    return allpicks
Пример #28
0
import sys
sys.path.insert(0, '/home/chet/EQcorrscan')
from glob import glob
from obspy import read, read_events
from obspy.core.event import ResourceIdentifier
from eqcorrscan.utils import plotting

temp_dir = '/media/chet/hdd/seismic/NZ/templates/rotnga_2015/refined_picks/*'
temp_files = glob(temp_dir)

# Template dictionary keyed to event resource_id
template_dict = {}
for filename in temp_files:
    uri_name = 'smi:org.gfz-potsdam.de/geofon/' +\
               filename.split('/')[-1].split('_')[-1].rstrip('.mseed')
    uri = ResourceIdentifier(uri_name)
    template_dict[uri] = read(filename)

# Raw template dictionary keyed to event resource_id
raw_dir = '/media/chet/hdd/seismic/NZ/templates/rotnga_2015/events_raw/*'
raw_files = glob(raw_dir)

raw_dict = {}
for filename in raw_files:
    uri_name = 'smi:org.gfz-potsdam.de/geofon/' +\
               filename.split('/')[-1].split('_')[-1].rstrip('.mseed')
    uri = ResourceIdentifier(uri_name)
    raw_dict[uri] = read(filename)

# Grab some catalog of interest
cat_list = glob('/media/chet/hdd/seismic/NZ/catalogs/qml/corr_groups/*029*')
Пример #29
0
 def test_origin(self):
     """
     Tests Origin object.
     """
     self.assertEqual(len(self.catalog[0].origins), 4)
     origin = self.catalog[0].origins[0]
     self.assertEqual(
         origin.resource_id,
         ResourceIdentifier(
             id='quakeml:us.anss.org/origin/20120101052755.98'))
     self.assertEqual(origin.type, 'hypocenter')
     self.assertEqual(origin.time, UTCDateTime(2012, 1, 1, 5, 27, 55,
                                               980000))
     self.assertEqual(origin.latitude, 31.456)
     self.assertAlmostEqual(origin.latitude_errors.uncertainty,
                            0.0155,
                            places=3)
     self.assertEqual(origin.longitude, 138.072)
     self.assertAlmostEqual(origin.longitude_errors.uncertainty,
                            0.0173,
                            places=3)
     self.assertEqual(origin.depth, 365300.0)
     self.assertEqual(origin.depth_errors.uncertainty, 2700.0)
     self.assertEqual(origin.depth_type, 'from location')
     self.assertEqual(origin.method_id, None)
     self.assertEqual(origin.time_fixed, None)
     self.assertEqual(origin.epicenter_fixed, None)
     self.assertEqual(
         origin.earth_model_id,
         ResourceIdentifier(id='quakeml:us.anss.org/earthmodel/ak135'))
     self.assertEqual(origin.evaluation_mode, None)
     self.assertEqual(origin.evaluation_status, None)
     self.assertEqual(origin.origin_type, None)
     # composite times
     self.assertEqual(len(origin.composite_times), 0)
     # quality
     self.assertEqual(origin.quality.used_station_count, 628)
     self.assertEqual(origin.quality.standard_error, 0.84)
     self.assertEqual(origin.quality.azimuthal_gap, 10.8)
     self.assertEqual(origin.quality.maximum_distance, 29.1)
     self.assertEqual(origin.quality.minimum_distance, 2.22)
     self.assertEqual(origin.quality.associated_phase_count, 52)
     self.assertEqual(origin.quality.associated_station_count, 628)
     self.assertEqual(origin.quality.depth_phase_count, 0)
     self.assertEqual(origin.quality.secondary_azimuthal_gap, None)
     self.assertEqual(origin.quality.ground_truth_level, None)
     self.assertEqual(origin.quality.median_distance, None)
     # comments
     self.assertEqual(len(origin.comments), 0)
     # creation info
     self.assertEqual(origin.creation_info.author, None)
     self.assertEqual(origin.creation_info.agency_id, 'USGS-NEIC')
     self.assertEqual(origin.creation_info.author_uri, None)
     self.assertEqual(origin.creation_info.agency_uri, None)
     self.assertEqual(origin.creation_info.creation_time, None)
     self.assertEqual(origin.creation_info.version, None)
     # origin uncertainty
     u = origin.origin_uncertainty
     self.assertEqual(u.preferred_description, 'confidence ellipsoid')
     self.assertEqual(u.horizontal_uncertainty, None)
     self.assertEqual(u.min_horizontal_uncertainty, None)
     self.assertEqual(u.max_horizontal_uncertainty, None)
     self.assertEqual(u.azimuth_max_horizontal_uncertainty, None)
     # confidence ellipsoid
     c = u.confidence_ellipsoid
     self.assertEqual(c.semi_intermediate_axis_length, 2750.0)
     # c.major_axis_rotation is computed during file reading:
     self.assertAlmostEqual(c.major_axis_rotation, 170.5, places=3)
     self.assertEqual(c.major_axis_plunge, 76.06)
     self.assertEqual(c.semi_minor_axis_length, 2210.0)
     self.assertEqual(c.semi_major_axis_length, 4220.0)
     self.assertEqual(c.major_axis_azimuth, 292.79)
Пример #30
0
def xcorr_pick_family(family,
                      stream,
                      shift_len=0.2,
                      min_cc=0.4,
                      horizontal_chans=['E', 'N', '1', '2'],
                      vertical_chans=['Z'],
                      cores=1,
                      interpolate=False,
                      plot=False,
                      plotdir=None):
    """
    Compute cross-correlation picks for detections in a family.

    :type family: `eqcorrscan.core.match_filter.family.Family`
    :param family: Family to calculate correlation picks for.
    :type stream: `obspy.core.stream.Stream`
    :param stream:
        Data stream containing data for all (or a subset of) detections in
        the Family
    :type shift_len: float
    :param shift_len:
        Shift length allowed for the pick in seconds, will be plus/minus this
        amount - default=0.2
    :type min_cc: float
    :param min_cc:
        Minimum cross-correlation value to be considered a pick, default=0.4.
    :type horizontal_chans: list
    :param horizontal_chans:
        List of channel endings for horizontal-channels, on which S-picks will
        be made.
    :type vertical_chans: list
    :param vertical_chans:
        List of channel endings for vertical-channels, on which P-picks will
        be made.
    :type cores: int
    :param cores:
        Number of cores to use in parallel processing, defaults to one.
    :type interpolate: bool
    :param interpolate:
        Interpolate the correlation function to achieve sub-sample precision.
    :type plot: bool
    :param plot:
        To generate a plot for every detection or not, defaults to False
    :type plotdir: str
    :param plotdir:
        Path to plotting folder, plots will be output here.

    :return: Catalog of events.
    """
    picked_dict = {}
    delta = family.template.st[0].stats.delta
    detect_streams_dict = _prepare_data(family=family,
                                        detect_data=stream,
                                        shift_len=shift_len)
    detection_ids = list(detect_streams_dict.keys())
    detect_streams = [
        detect_streams_dict[detection_id] for detection_id in detection_ids
    ]
    if len(detect_streams) == 0:
        Logger.warning("No appropriate data found, check your family and "
                       "detections - make sure seed ids match")
        return picked_dict
    if len(detect_streams) != len(family):
        Logger.warning("Not all detections have matching data. "
                       "Proceeding anyway. HINT: Make sure SEED IDs match")
    # Correlation function needs a list of streams, we need to maintain order.
    ccc, chans = _concatenate_and_correlate(streams=detect_streams,
                                            template=family.template.st,
                                            cores=cores)
    for i, detection_id in enumerate(detection_ids):
        detection = [d for d in family.detections if d.id == detection_id][0]
        correlations = ccc[i]
        picked_chans = chans[i]
        detect_stream = detect_streams_dict[detection_id]
        checksum, cccsum, used_chans = 0.0, 0.0, 0
        event = Event()
        for correlation, stachan in zip(correlations, picked_chans):
            if not stachan.used:
                continue
            tr = detect_stream.select(station=stachan.channel[0],
                                      channel=stachan.channel[1])[0]
            if interpolate:
                shift, cc_max = _xcorr_interp(correlation, dt=delta)
            else:
                cc_max = np.amax(correlation)
                shift = np.argmax(correlation) * delta
            if np.isnan(cc_max):  # pragma: no cover
                Logger.error(
                    'Problematic trace, no cross correlation possible')
                continue
            picktime = tr.stats.starttime + shift
            checksum += cc_max
            used_chans += 1
            if cc_max < min_cc:
                Logger.debug('Correlation of {0} is below threshold, not '
                             'using'.format(cc_max))
                continue
            cccsum += cc_max
            phase = None
            if stachan.channel[1][-1] in vertical_chans:
                phase = 'P'
            elif stachan.channel[1][-1] in horizontal_chans:
                phase = 'S'
            _waveform_id = WaveformStreamID(seed_string=tr.id)
            event.picks.append(
                Pick(waveform_id=_waveform_id,
                     time=picktime,
                     method_id=ResourceIdentifier('EQcorrscan'),
                     phase_hint=phase,
                     creation_info='eqcorrscan.core.lag_calc',
                     evaluation_mode='automatic',
                     comments=[Comment(text='cc_max={0}'.format(cc_max))]))
        event.resource_id = ResourceIdentifier(detection_id)
        event.comments.append(Comment(text="detect_val={0}".format(cccsum)))
        # Add template-name as comment to events
        event.comments.append(
            Comment(text="Detected using template: {0}".format(
                family.template.name)))
        if used_chans == detection.no_chans:  # pragma: no cover
            if detection.detect_val is not None and\
               checksum - detection.detect_val < -(0.3 * detection.detect_val):
                msg = ('lag-calc has decreased cccsum from %f to %f - ' %
                       (detection.detect_val, checksum))
                Logger.error(msg)
                continue
        else:
            Logger.warning(
                'Cannot check if cccsum is better, used {0} channels for '
                'detection, but {1} are used here'.format(
                    detection.no_chans, used_chans))
        picked_dict.update({detection_id: event})
    if plot:  # pragma: no cover
        for i, event in enumerate(picked_dict.values()):
            if len(event.picks) == 0:
                continue
            plot_stream = detect_streams[i].copy()
            template_plot = family.template.st.copy()
            pick_stachans = [(pick.waveform_id.station_code,
                              pick.waveform_id.channel_code)
                             for pick in event.picks]
            for tr in plot_stream:
                if (tr.stats.station, tr.stats.channel) \
                        not in pick_stachans:
                    plot_stream.remove(tr)
            for tr in template_plot:
                if (tr.stats.station, tr.stats.channel) \
                        not in pick_stachans:
                    template_plot.remove(tr)
            if plotdir is not None:
                if not os.path.isdir(plotdir):
                    os.makedirs(plotdir)
                savefile = "{plotdir}/{rid}.png".format(
                    plotdir=plotdir, rid=event.resource_id.id)
                plot_repicked(template=template_plot,
                              picks=event.picks,
                              det_stream=plot_stream,
                              show=False,
                              save=True,
                              savefile=savefile)
            else:
                plot_repicked(template=template_plot,
                              picks=event.picks,
                              det_stream=plot_stream,
                              show=True)
    return picked_dict
Пример #31
0
 def test_focalmechanism(self):
     """
     Tests FocalMechanism object.
     """
     filename = os.path.join(self.path, 'quakeml_1.2_focalmechanism.xml')
     catalog = readQuakeML(filename)
     self.assertEqual(len(catalog), 1)
     self.assertEqual(len(catalog[0].focal_mechanisms), 2)
     fm = catalog[0].focal_mechanisms[0]
     # general
     self.assertEqual(fm.resource_id,
                      ResourceIdentifier('smi:ISC/fmid=292309'))
     self.assertEqual(fm.waveform_id.network_code, 'BW')
     self.assertEqual(fm.waveform_id.station_code, 'FUR')
     self.assertEqual(
         fm.waveform_id.resource_uri,
         ResourceIdentifier(resource_id="smi:ch.ethz.sed/waveform/201754"))
     self.assertTrue(isinstance(fm.waveform_id, WaveformStreamID))
     self.assertEqual(fm.triggering_origin_id,
                      ResourceIdentifier('smi:local/originId=7680412'))
     self.assertAlmostEqual(fm.azimuthal_gap, 0.123)
     self.assertEqual(fm.station_polarity_count, 987)
     self.assertAlmostEqual(fm.misfit, 1.234)
     self.assertAlmostEqual(fm.station_distribution_ratio, 2.345)
     self.assertEqual(
         fm.method_id,
         ResourceIdentifier('smi:ISC/methodID=Best_double_couple'))
     # comments
     self.assertEqual(len(fm.comments), 2)
     c = fm.comments
     self.assertEqual(c[0].text, 'Relocated after re-evaluation')
     self.assertEqual(c[0].resource_id, None)
     self.assertEqual(c[0].creation_info.agency_id, 'MUH')
     self.assertEqual(c[1].text, 'Another MUH')
     self.assertEqual(
         c[1].resource_id,
         ResourceIdentifier(resource_id="smi:some/comment/id/number_3"))
     self.assertEqual(c[1].creation_info, None)
     # creation info
     self.assertEqual(fm.creation_info.author, "Erika Mustermann")
     self.assertEqual(fm.creation_info.agency_id, "MUH")
     self.assertEqual(
         fm.creation_info.author_uri,
         ResourceIdentifier("smi:smi-registry/organization/MUH"))
     self.assertEqual(
         fm.creation_info.agency_uri,
         ResourceIdentifier("smi:smi-registry/organization/MUH"))
     self.assertEqual(fm.creation_info.creation_time,
                      UTCDateTime("2012-04-04T16:40:50+00:00"))
     self.assertEqual(fm.creation_info.version, "1.0.1")
     # nodalPlanes
     self.assertAlmostEqual(fm.nodal_planes.nodal_plane_1.strike, 346.0)
     self.assertAlmostEqual(fm.nodal_planes.nodal_plane_1.dip, 57.0)
     self.assertAlmostEqual(fm.nodal_planes.nodal_plane_1.rake, 75.0)
     self.assertAlmostEqual(fm.nodal_planes.nodal_plane_2.strike, 193.0)
     self.assertAlmostEqual(fm.nodal_planes.nodal_plane_2.dip, 36.0)
     self.assertAlmostEqual(fm.nodal_planes.nodal_plane_2.rake, 112.0)
     self.assertEqual(fm.nodal_planes.preferred_plane, 2)
     # principalAxes
     self.assertAlmostEqual(fm.principal_axes.t_axis.azimuth, 216.0)
     self.assertAlmostEqual(fm.principal_axes.t_axis.plunge, 73.0)
     self.assertAlmostEqual(fm.principal_axes.t_axis.length, 1.050e+18)
     self.assertAlmostEqual(fm.principal_axes.p_axis.azimuth, 86.0)
     self.assertAlmostEqual(fm.principal_axes.p_axis.plunge, 10.0)
     self.assertAlmostEqual(fm.principal_axes.p_axis.length, -1.180e+18)
     self.assertEqual(fm.principal_axes.n_axis.azimuth, None)
     self.assertEqual(fm.principal_axes.n_axis.plunge, None)
     self.assertEqual(fm.principal_axes.n_axis.length, None)
     # momentTensor
     mt = fm.moment_tensor
     self.assertEqual(mt.resource_id,
                      ResourceIdentifier('smi:ISC/mtid=123321'))
     self.assertEqual(mt.derived_origin_id,
                      ResourceIdentifier('smi:ISC/origid=13145006'))
     self.assertAlmostEqual(mt.scalar_moment, 1.100e+18)
     self.assertAlmostEqual(mt.tensor.m_rr, 9.300e+17)
     self.assertAlmostEqual(mt.tensor.m_tt, 1.700e+17)
     self.assertAlmostEqual(mt.tensor.m_pp, -1.100e+18)
     self.assertAlmostEqual(mt.tensor.m_rt, -2.200e+17)
     self.assertAlmostEqual(mt.tensor.m_rp, 4.000e+17)
     self.assertAlmostEqual(mt.tensor.m_tp, 3.000e+16)
     self.assertAlmostEqual(mt.clvd, 0.22)
     # exporting back to XML should result in the same document
     original = open(filename, "rt").read()
     processed = Pickler().dumps(catalog)
     self._compareStrings(original, processed)
Пример #32
0
def _read_evt(filename, inventory=None, id_map=None, id_default='.{}..{}',
              encoding='utf-8'):
    """
    Read a SeismicHandler EVT file and returns an ObsPy Catalog object.

    .. warning::
        This function should NOT be called directly, it registers via the
        ObsPy :func:`~obspy.core.event.read_events` function, call this
        instead.

    :type filename: str
    :param filename: File or file-like object in text mode.
    :type inventory: :class:`~obspy.core.inventory.inventory.Inventory`
    :param inventory: Inventory used to retrieve network code, location code
        and channel code of stations (SEED id).
    :type id_map: dict
    :param id_map: If channel information was not found in inventory,
        it will be looked up in this dictionary
        (example: `id_map={'MOX': 'GR.{}..HH{}'`).
        The values must contain three dots and two `{}` which are
        substituted by station code and component.
    :type id_default: str
    :param id_default: Default SEED id expression.
        The value must contain three dots and two `{}` which are
        substituted by station code and component.
    :param str encoding: encoding used (default: utf-8)

    :rtype: :class:`~obspy.core.event.Catalog`
    :return: An ObsPy Catalog object.

    .. note::
        The following fields are supported by this function: %s.

        Compare with http://www.seismic-handler.org/wiki/ShmDocFileEvt
    """
    seed_map = _seed_id_map(inventory, id_map, id_default)
    with io.open(filename, 'r', encoding=encoding) as f:
        temp = f.read()
    # first create phases and phases_o dictionaries for different phases
    # and phases with origin information
    phases = defaultdict(list)
    phases_o = {}
    phase = {}
    evid = None
    for line in temp.splitlines():
        if 'End of Phase' in line:
            if 'origin time' in phase.keys():
                if evid in phases_o:
                    # found more than one origin
                    pass
                phases_o[evid] = phase
            phases[evid].append(phase)
            phase = {}
            evid = None
        elif line.strip() != '':
            try:
                key, value = line.split(':', 1)
            except ValueError:
                continue
            key = key.strip().lower()
            value = value.strip()
            if key == 'event id':
                evid = value
            elif value != '':
                phase[key] = value
    assert evid is None

    # now create obspy Events from phases and phases_o dictionaries
    events = []
    for evid in phases:
        picks = []
        arrivals = []
        stamags = []
        origins = []
        po = None
        magnitudes = []
        pm = None
        for p in phases[evid]:
            try:
                sta = p['station code']
            except KeyError:
                sta = ''
            try:
                comp = p['component']
            except KeyError:
                comp = ''
            try:
                wid = seed_map[sta]
            except KeyError:
                wid = id_default
            wid = WaveformStreamID(seed_string=wid.format(sta, comp))
            pick = Pick(waveform_id=wid, **_kw(p, 'pick'))
            arrival = Arrival(pick_id=pick.resource_id, **_kw(p, 'arrival'))
            picks.append(pick)
            arrivals.append(arrival)
            stamags_temp, _ = _mags(p, evid, stamag=True, wid=wid)
            stamags.extend(stamags_temp)
        if evid in phases_o:
            o = phases_o[evid]
            uncertainty = OriginUncertainty(**_kw(o, 'origin_uncertainty'))
            origin = Origin(arrivals=arrivals, origin_uncertainty=uncertainty,
                            **_kw(o, 'origin'))
            if origin.latitude is None or origin.longitude is None:
                warn('latitude or longitude not set for event %s' % evid)
            else:
                if origin.longitude_errors.uncertainty is not None:
                    origin.longitude_errors.uncertainty *= cos(
                        origin.latitude / 180 * pi)
                origins = [origin]
                po = origin.resource_id
            magnitudes, pm = _mags(o, evid)
        else:
            o = p
        event = Event(resource_id=ResourceIdentifier(evid),
                      picks=picks,
                      origins=origins,
                      magnitudes=magnitudes,
                      station_magnitudes=stamags,
                      preferred_origin_id=po,
                      preferred_magnitude_id=pm,
                      **_kw(o, 'event')
                      )
        events.append(event)
    return Catalog(events,
                   description='Created from SeismicHandler EVT format')
Пример #33
0
def full_test_event():
    """
    Function to generate a basic, full test event
    """
    test_event = Event()
    test_event.origins.append(
        Origin(time=UTCDateTime("2012-03-26") + 1.2,
               latitude=45.0,
               longitude=25.0,
               depth=15000))
    test_event.event_descriptions.append(EventDescription())
    test_event.event_descriptions[0].text = 'LE'
    test_event.creation_info = CreationInfo(agency_id='TES')
    test_event.magnitudes.append(
        Magnitude(mag=0.1,
                  magnitude_type='ML',
                  creation_info=CreationInfo('TES'),
                  origin_id=test_event.origins[0].resource_id))
    test_event.magnitudes.append(
        Magnitude(mag=0.5,
                  magnitude_type='Mc',
                  creation_info=CreationInfo('TES'),
                  origin_id=test_event.origins[0].resource_id))
    test_event.magnitudes.append(
        Magnitude(mag=1.3,
                  magnitude_type='Ms',
                  creation_info=CreationInfo('TES'),
                  origin_id=test_event.origins[0].resource_id))

    # Define the test pick
    _waveform_id_1 = WaveformStreamID(station_code='FOZ',
                                      channel_code='SHZ',
                                      network_code='NZ')
    _waveform_id_2 = WaveformStreamID(station_code='WTSZ',
                                      channel_code='BH1',
                                      network_code=' ')
    # Pick to associate with amplitude
    test_event.picks.append(
        Pick(waveform_id=_waveform_id_1,
             phase_hint='IAML',
             polarity='undecidable',
             time=UTCDateTime("2012-03-26") + 1.68,
             evaluation_mode="manual"))
    # Need a second pick for coda
    test_event.picks.append(
        Pick(waveform_id=_waveform_id_1,
             onset='impulsive',
             phase_hint='PN',
             polarity='positive',
             time=UTCDateTime("2012-03-26") + 1.68,
             evaluation_mode="manual"))
    # Unassociated pick
    test_event.picks.append(
        Pick(waveform_id=_waveform_id_2,
             onset='impulsive',
             phase_hint='SG',
             polarity='undecidable',
             time=UTCDateTime("2012-03-26") + 1.72,
             evaluation_mode="manual"))
    # Unassociated pick
    test_event.picks.append(
        Pick(waveform_id=_waveform_id_2,
             onset='impulsive',
             phase_hint='PN',
             polarity='undecidable',
             time=UTCDateTime("2012-03-26") + 1.62,
             evaluation_mode="automatic"))
    # Test a generic local magnitude amplitude pick
    test_event.amplitudes.append(
        Amplitude(generic_amplitude=2.0,
                  period=0.4,
                  pick_id=test_event.picks[0].resource_id,
                  waveform_id=test_event.picks[0].waveform_id,
                  unit='m',
                  magnitude_hint='ML',
                  category='point',
                  type='AML'))
    # Test a coda magnitude pick
    test_event.amplitudes.append(
        Amplitude(generic_amplitude=10,
                  pick_id=test_event.picks[1].resource_id,
                  waveform_id=test_event.picks[1].waveform_id,
                  type='END',
                  category='duration',
                  unit='s',
                  magnitude_hint='Mc',
                  snr=2.3))
    test_event.origins[0].arrivals.append(
        Arrival(time_weight=0,
                phase=test_event.picks[1].phase_hint,
                pick_id=test_event.picks[1].resource_id))
    test_event.origins[0].arrivals.append(
        Arrival(time_weight=2,
                phase=test_event.picks[2].phase_hint,
                pick_id=test_event.picks[2].resource_id,
                backazimuth_residual=5,
                time_residual=0.2,
                distance=15,
                azimuth=25))
    test_event.origins[0].arrivals.append(
        Arrival(time_weight=2,
                phase=test_event.picks[3].phase_hint,
                pick_id=test_event.picks[3].resource_id,
                backazimuth_residual=5,
                time_residual=0.2,
                distance=15,
                azimuth=25))
    # Add in error info (line E)
    test_event.origins[0].quality = OriginQuality(standard_error=0.01,
                                                  azimuthal_gap=36)
    # Origin uncertainty in Seisan is output as long-lat-depth, quakeML has
    # semi-major and semi-minor
    test_event.origins[0].origin_uncertainty = OriginUncertainty(
        confidence_ellipsoid=ConfidenceEllipsoid(
            semi_major_axis_length=3000,
            semi_minor_axis_length=1000,
            semi_intermediate_axis_length=2000,
            major_axis_plunge=20,
            major_axis_azimuth=100,
            major_axis_rotation=4))
    test_event.origins[0].time_errors = QuantityError(uncertainty=0.5)
    # Add in fault-plane solution info (line F) - Note have to check program
    # used to determine which fields are filled....
    test_event.focal_mechanisms.append(
        FocalMechanism(nodal_planes=NodalPlanes(
            nodal_plane_1=NodalPlane(strike=180,
                                     dip=20,
                                     rake=30,
                                     strike_errors=QuantityError(10),
                                     dip_errors=QuantityError(10),
                                     rake_errors=QuantityError(20))),
                       method_id=ResourceIdentifier(
                           "smi:nc.anss.org/focalMechanism/FPFIT"),
                       creation_info=CreationInfo(agency_id="NC"),
                       misfit=0.5,
                       station_distribution_ratio=0.8))
    # Need to test high-precision origin and that it is preferred origin.
    # Moment tensor includes another origin
    test_event.origins.append(
        Origin(time=UTCDateTime("2012-03-26") + 1.2,
               latitude=45.1,
               longitude=25.2,
               depth=14500))
    test_event.magnitudes.append(
        Magnitude(mag=0.1,
                  magnitude_type='MW',
                  creation_info=CreationInfo('TES'),
                  origin_id=test_event.origins[-1].resource_id))
    # Moment tensors go with focal-mechanisms
    test_event.focal_mechanisms.append(
        FocalMechanism(moment_tensor=MomentTensor(
            derived_origin_id=test_event.origins[-1].resource_id,
            moment_magnitude_id=test_event.magnitudes[-1].resource_id,
            scalar_moment=100,
            tensor=Tensor(
                m_rr=100, m_tt=100, m_pp=10, m_rt=1, m_rp=20, m_tp=15),
            method_id=ResourceIdentifier(
                'smi:nc.anss.org/momentTensor/BLAH'))))
    return test_event
Пример #34
0
def _channel_loop(detection,
                  template,
                  min_cc,
                  detection_id,
                  interpolate,
                  i,
                  pre_lag_ccsum=None,
                  detect_chans=0,
                  horizontal_chans=['E', 'N', '1', '2'],
                  vertical_chans=['Z'],
                  debug=0):
    """
    Inner loop for correlating and assigning picks.

    Utility function to take a stream of data for the detected event and write
    maximum correlation to absolute time as picks in an obspy.core.event.Event
    object.
    Only outputs picks for picks above min_cc.

    :type detection: obspy.core.stream.Stream
    :param detection:
        Stream of data for the slave event detected using template.
    :type template: obspy.core.stream.Stream
    :param template: Stream of data as the template for the detection.
    :type min_cc: float
    :param min_cc: Minimum cross-correlation value to allow a pick to be made.
    :type detection_id: str
    :param detection_id: Detection ID to associate the event with.
    :type interpolate: bool
    :param interpolate:
        Interpolate the correlation function to achieve sub-sample precision.
    :type i: int
    :param i:
        Used to track which process has occurred when running in parallel.
    :type pre_lag_ccsum: float
    :param pre_lag_ccsum:
        Cross-correlation sum before lag-calc, will check that the
        cross-correlation sum is increased by lag-calc (using all channels,
        ignoring min_cc)
    :type detect_chans: int
    :param detect_chans:
        Number of channels originally used in detections, must match the number
        used here to allow for cccsum checking.
    :type horizontal_chans: list
    :param horizontal_chans:
        List of channel endings for horizontal-channels, on which S-picks will
        be made.
    :type vertical_chans: list
    :param vertical_chans:
        List of channel endings for vertical-channels, on which P-picks will
        be made.
    :type debug: int
    :param debug: Debug output level 0-5.

    :returns:
        Event object containing network, station, channel and pick information.
    :rtype: :class:`obspy.core.event.Event`
    """
    from eqcorrscan.core.match_filter import normxcorr2
    event = Event()
    s_stachans = {}
    cccsum = 0
    checksum = 0
    used_chans = 0
    for tr in template:
        temp_net = tr.stats.network
        temp_sta = tr.stats.station
        temp_chan = tr.stats.channel
        debug_print('Working on: %s.%s.%s' % (temp_net, temp_sta, temp_chan),
                    3, debug)
        image = detection.select(station=temp_sta, channel=temp_chan)
        if len(image) == 0:
            print('No match in image.')
            continue
        if interpolate:
            try:
                ccc = normxcorr2(tr.data, image[0].data)
            except Exception:
                print('Could not calculate cc')
                print('Image is %i long' % len(image[0].data))
                print('Template is %i long' % len(tr.data))
                continue
            try:
                shift, cc_max = _xcorr_interp(ccc=ccc, dt=image[0].stats.delta)
            except IndexError:
                print('Could not interpolate ccc, not smooth')
                ccc = normxcorr2(tr.data, image[0].data)
                cc_max = np.amax(ccc)
                shift = np.argmax(ccc) * image[0].stats.delta
            # Convert the maximum cross-correlation time to an actual time
            picktime = image[0].stats.starttime + shift
        else:
            # Convert the maximum cross-correlation time to an actual time
            try:
                ccc = normxcorr2(tr.data, image[0].data)
            except Exception:
                print('Could not calculate cc')
                print('Image is %i long' % len(image[0].data))
                print('Template is %i long' % len(tr.data))
                continue
            cc_max = np.amax(ccc)
            picktime = image[0].stats.starttime + (np.argmax(ccc) *
                                                   image[0].stats.delta)
        debug_print('Maximum cross-corr=%s' % cc_max, 3, debug)
        checksum += cc_max
        used_chans += 1
        if cc_max < min_cc:
            debug_print('Correlation below threshold, not used', 3, debug)
            continue
        cccsum += cc_max
        # Perhaps weight each pick by the cc val or cc val^2?
        # weight = np.amax(ccc) ** 2
        if temp_chan[-1] in vertical_chans:
            phase = 'P'
        # Only take the S-pick with the best correlation
        elif temp_chan[-1] in horizontal_chans:
            phase = 'S'
            debug_print(
                'Making S-pick on: %s.%s.%s' % (temp_net, temp_sta, temp_chan),
                4, debug)
            if temp_sta not in s_stachans.keys():
                s_stachans[temp_sta] = ((temp_chan, np.amax(ccc), picktime))
            elif temp_sta in s_stachans.keys():
                if np.amax(ccc) > s_stachans[temp_sta][1]:
                    picktime = picktime
                else:
                    continue
        else:
            phase = None
        _waveform_id = WaveformStreamID(network_code=temp_net,
                                        station_code=temp_sta,
                                        channel_code=temp_chan)
        event.picks.append(
            Pick(waveform_id=_waveform_id,
                 time=picktime,
                 method_id=ResourceIdentifier('EQcorrscan'),
                 phase_hint=phase,
                 creation_info='eqcorrscan.core.lag_calc',
                 comments=[Comment(text='cc_max=%s' % cc_max)]))
        event.resource_id = detection_id
    ccc_str = ("detect_val=%s" % cccsum)
    event.comments.append(Comment(text=ccc_str))
    if used_chans == detect_chans:
        if pre_lag_ccsum is not None and\
           checksum - pre_lag_ccsum < -(0.30 * pre_lag_ccsum):
            msg = ('lag-calc has decreased cccsum from %f to %f - ' %
                   (pre_lag_ccsum, checksum))
            # warnings.warn(msg)
            raise LagCalcError(msg)
    else:
        warnings.warn('Cannot check if cccsum is better, used %i channels '
                      'for detection, but %i are used here' %
                      (detect_chans, used_chans))
    return i, event
Пример #35
0
 def test_adding_a_referred_object_after_creation(self):
     """
     Check that the referred objects can also be made available after the
     ResourceIdentifier instances have been created.
     """
     obj = UTCDateTime()
     res_id = "obspy.org/time/test"
     ref_a = ResourceIdentifier(res_id)
     ref_b = ResourceIdentifier(res_id)
     ref_c = ResourceIdentifier(res_id)
     # All three will have no resource attached.
     self.assertEqual(ref_a.get_referred_object(), None)
     self.assertEqual(ref_b.get_referred_object(), None)
     self.assertEqual(ref_c.get_referred_object(), None)
     # Setting the object for one will make it available to all other
     # instances, provided they weren't bound to specific objects.
     ref_b.set_referred_object(obj)
     self.assertIs(ref_a.get_referred_object(), obj)
     self.assertIs(ref_b.get_referred_object(), obj)
     self.assertIs(ref_c.get_referred_object(), obj)
Пример #36
0
    def _parseRecordP(self, line, event):
        """
        Parses the 'primary phase record' P

        The primary phase is the first phase of the reading,
        regardless its type.
        """
        station = line[2:7].strip()
        phase = line[7:15]
        arrival_time = line[15:24]
        residual = self._float(line[25:30])
        # unused: residual_flag = line[30]
        distance = self._float(line[32:38])  # degrees
        azimuth = self._float(line[39:44])
        backazimuth = round(azimuth % -360 + 180, 1)
        mb_period = self._float(line[44:48])
        mb_amplitude = self._float(line[48:55])  # nanometers
        mb_magnitude = self._float(line[56:59])
        # unused: mb_usage_flag = line[59]

        origin = event.origins[0]
        evid = event.resource_id.id.split('/')[-1]
        waveform_id = WaveformStreamID()
        waveform_id.station_code = station
        # network_code is required for QuakeML validation
        waveform_id.network_code = '  '
        station_string = \
            waveform_id.getSEEDString()\
            .replace(' ', '-').replace('.', '_').lower()
        prefix = '/'.join(
            (res_id_prefix, 'waveformstream', evid, station_string))
        waveform_id.resource_uri = ResourceIdentifier(prefix=prefix)
        pick = Pick()
        prefix = '/'.join((res_id_prefix, 'pick', evid, station_string))
        pick.resource_id = ResourceIdentifier(prefix=prefix)
        date = origin.time.strftime('%Y%m%d')
        pick.time = UTCDateTime(date + arrival_time)
        # Check if pick is on the next day:
        if pick.time < origin.time:
            pick.time += timedelta(days=1)
        pick.waveform_id = waveform_id
        pick.backazimuth = backazimuth
        onset = phase[0]
        if onset == 'e':
            pick.onset = 'emergent'
            phase = phase[1:]
        elif onset == 'i':
            pick.onset = 'impulsive'
            phase = phase[1:]
        elif onset == 'q':
            pick.onset = 'questionable'
            phase = phase[1:]
        pick.phase_hint = phase.strip()
        event.picks.append(pick)
        if mb_amplitude is not None:
            amplitude = Amplitude()
            prefix = '/'.join((res_id_prefix, 'amp', evid, station_string))
            amplitude.resource_id = ResourceIdentifier(prefix=prefix)
            amplitude.generic_amplitude = mb_amplitude * 1E-9
            amplitude.unit = 'm'
            amplitude.period = mb_period
            amplitude.type = 'AB'
            amplitude.magnitude_hint = 'Mb'
            amplitude.pick_id = pick.resource_id
            amplitude.waveform_id = pick.waveform_id
            event.amplitudes.append(amplitude)
            station_magnitude = StationMagnitude()
            prefix = '/'.join(
                (res_id_prefix, 'stationmagntiude', evid, station_string))
            station_magnitude.resource_id = ResourceIdentifier(prefix=prefix)
            station_magnitude.origin_id = origin.resource_id
            station_magnitude.mag = mb_magnitude
            # station_magnitude.mag_errors['uncertainty'] = 0.0
            station_magnitude.station_magnitude_type = 'Mb'
            station_magnitude.amplitude_id = amplitude.resource_id
            station_magnitude.waveform_id = pick.waveform_id
            res_id = '/'.join(
                (res_id_prefix, 'magnitude/generic/body_wave_magnitude'))
            station_magnitude.method_id = \
                ResourceIdentifier(id=res_id)
            event.station_magnitudes.append(station_magnitude)
        arrival = Arrival()
        prefix = '/'.join((res_id_prefix, 'arrival', evid, station_string))
        arrival.resource_id = ResourceIdentifier(prefix=prefix)
        arrival.pick_id = pick.resource_id
        arrival.phase = pick.phase_hint
        arrival.azimuth = azimuth
        arrival.distance = distance
        arrival.time_residual = residual
        res_id = '/'.join((res_id_prefix, 'earthmodel/ak135'))
        arrival.earth_model_id = ResourceIdentifier(id=res_id)
        origin.arrivals.append(arrival)
        origin.quality.minimum_distance = min(
            d for d in (arrival.distance, origin.quality.minimum_distance)
            if d is not None)
        origin.quality.maximum_distance = \
            max(arrival.distance, origin.quality.minimum_distance)
        origin.quality.associated_phase_count += 1
        return pick, arrival
Пример #37
0
def _detect(detector,
            st,
            threshold,
            trig_int,
            moveout=0,
            min_trig=0,
            process=True,
            extract_detections=False,
            cores=1):
    """
    Detect within continuous data using the subspace method.

    Not to be called directly, use the detector.detect method.

    :type detector: eqcorrscan.core.subspace.Detector
    :param detector: Detector to use.
    :type st: obspy.core.stream.Stream
    :param st: Un-processed stream to detect within using the subspace \
        detector
    :type threshold: float
    :param threshold: Threshold value for detections between 0-1
    :type trig_int: float
    :param trig_int: Minimum trigger interval in seconds.
    :type moveout: float
    :param moveout: Maximum allowable moveout window for non-multiplexed,
        network detection.  See note.
    :type min_trig: int
    :param min_trig: Minimum number of stations exceeding threshold for \
        non-multiplexed, network detection. See note.
    :type process: bool
    :param process: Whether or not to process the stream according to the \
        parameters defined by the detector.  Default is to process the \
        data (True).
    :type extract_detections: bool
    :param extract_detections: Whether to extract waveforms for each \
        detection or not, if true will return detections and streams.

    :return: list of detections
    :rtype: list of eqcorrscan.core.match_filter.Detection
    """
    detections = []
    # First process the stream
    if process:
        Logger.info('Processing Stream')
        stream, stachans = _subspace_process(
            streams=[st.copy()],
            lowcut=detector.lowcut,
            highcut=detector.highcut,
            filt_order=detector.filt_order,
            sampling_rate=detector.sampling_rate,
            multiplex=detector.multiplex,
            stachans=detector.stachans,
            parallel=True,
            align=False,
            shift_len=None,
            reject=False,
            cores=cores)
    else:
        # Check the sampling rate at the very least
        for tr in st:
            if not tr.stats.sampling_rate == detector.sampling_rate:
                raise ValueError('Sampling rates do not match.')
        stream = [st]
        stachans = detector.stachans
    outtic = default_timer()
    # If multiplexed, how many samples do we increment by?
    if detector.multiplex:
        Nc = len(detector.stachans)
    else:
        Nc = 1
    # Here do all ffts
    fft_vars = _do_ffts(detector, stream, Nc)
    Logger.info('Computing detection statistics')
    Logger.info('Preallocating stats matrix')
    stats = np.zeros(
        (len(stream[0]), (len(stream[0][0]) // Nc) - (fft_vars[4] // Nc) + 1))
    for det_freq, data_freq_sq, data_freq, i in zip(fft_vars[0], fft_vars[1],
                                                    fft_vars[2],
                                                    np.arange(len(stream[0]))):
        # Calculate det_statistic in frequency domain
        stats[i] = _det_stat_freq(det_freq, data_freq_sq, data_freq,
                                  fft_vars[3], Nc, fft_vars[4], fft_vars[5])
        Logger.info('Stats matrix is shape %s' % str(stats[i].shape))
    trig_int_samples = detector.sampling_rate * trig_int
    Logger.info('Finding peaks')
    peaks = []
    for i in range(len(stream[0])):
        peaks.append(
            findpeaks.find_peaks2_short(arr=stats[i],
                                        thresh=threshold,
                                        trig_int=trig_int_samples))
    if not detector.multiplex:
        # Conduct network coincidence triggering
        peaks = findpeaks.coin_trig(peaks=peaks,
                                    samp_rate=detector.sampling_rate,
                                    moveout=moveout,
                                    min_trig=min_trig,
                                    stachans=stachans,
                                    trig_int=trig_int)
    else:
        peaks = peaks[0]
    if len(peaks) > 0:
        for peak in peaks:
            detecttime = st[0].stats.starttime + \
                (peak[1] / detector.sampling_rate)
            rid = ResourceIdentifier(id=detector.name + '_' + str(detecttime),
                                     prefix='smi:local')
            ev = Event(resource_id=rid)
            cr_i = CreationInfo(author='EQcorrscan',
                                creation_time=UTCDateTime())
            ev.creation_info = cr_i
            # All detection info in Comments for lack of a better idea
            thresh_str = 'threshold=' + str(threshold)
            ccc_str = 'detect_val=' + str(peak[0])
            used_chans = 'channels used: ' +\
                ' '.join([str(pair) for pair in detector.stachans])
            ev.comments.append(Comment(text=thresh_str))
            ev.comments.append(Comment(text=ccc_str))
            ev.comments.append(Comment(text=used_chans))
            for stachan in detector.stachans:
                tr = st.select(station=stachan[0], channel=stachan[1])
                if tr:
                    net_code = tr[0].stats.network
                else:
                    net_code = ''
                pick_tm = detecttime
                wv_id = WaveformStreamID(network_code=net_code,
                                         station_code=stachan[0],
                                         channel_code=stachan[1])
                ev.picks.append(Pick(time=pick_tm, waveform_id=wv_id))
            detections.append(
                Detection(template_name=detector.name,
                          detect_time=detecttime,
                          no_chans=len(detector.stachans),
                          detect_val=peak[0],
                          threshold=threshold,
                          typeofdet='subspace',
                          threshold_type='abs',
                          threshold_input=threshold,
                          chans=detector.stachans,
                          event=ev))
    outtoc = default_timer()
    Logger.info('Detection took %s seconds' % str(outtoc - outtic))
    if extract_detections:
        detection_streams = extract_from_stream(st, detections)
        return detections, detection_streams
    return detections
Пример #38
0
    def _parseRecordS(self, line, event, p_pick, p_arrival):
        """
        Parses the 'secondary phases' record S

        Secondary phases are following phases of the reading,
        and can be P-type or S-type.
        """
        arrivals = []
        phase = line[7:15].strip()
        arrival_time = line[15:24]
        if phase:
            arrivals.append((phase, arrival_time))
        phase = line[25:33].strip()
        arrival_time = line[33:42]
        if phase:
            arrivals.append((phase, arrival_time))
        phase = line[43:51].strip()
        arrival_time = line[51:60]
        if phase:
            arrivals.append((phase, arrival_time))

        evid = event.resource_id.id.split('/')[-1]
        station_string = \
            p_pick.waveform_id.getSEEDString()\
            .replace(' ', '-').replace('.', '_').lower()
        origin = event.origins[0]
        for phase, arrival_time in arrivals:
            if phase[0:2] == 'D=':
                # unused: depth = self._float(phase[2:7])
                try:
                    depth_usage_flag = phase[7]
                except IndexError:
                    # usage flag is not defined
                    depth_usage_flag = None
                # FIXME: I'm not sure that 'X' actually
                # means 'used'
                if depth_usage_flag == 'X':
                    # FIXME: is this enough to say that
                    # the event is constained by depth pahses?
                    origin.depth_type = 'constrained by depth phases'
                    origin.quality.depth_phase_count += 1
            else:
                pick = Pick()
                prefix = '/'.join(
                    (res_id_prefix, 'pick', evid, station_string))
                pick.resource_id = ResourceIdentifier(prefix=prefix)
                date = origin.time.strftime('%Y%m%d')
                pick.time = UTCDateTime(date + arrival_time)
                # Check if pick is on the next day:
                if pick.time < origin.time:
                    pick.time += timedelta(days=1)
                pick.waveform_id = p_pick.waveform_id
                pick.backazimuth = p_pick.backazimuth
                onset = phase[0]
                if onset == 'e':
                    pick.onset = 'emergent'
                    phase = phase[1:]
                elif onset == 'i':
                    pick.onset = 'impulsive'
                    phase = phase[1:]
                elif onset == 'q':
                    pick.onset = 'questionable'
                    phase = phase[1:]
                pick.phase_hint = phase.strip()
                event.picks.append(pick)
                arrival = Arrival()
                prefix = '/'.join(
                    (res_id_prefix, 'arrival', evid, station_string))
                arrival.resource_id = ResourceIdentifier(prefix=prefix)
                arrival.pick_id = pick.resource_id
                arrival.phase = pick.phase_hint
                arrival.azimuth = p_arrival.azimuth
                arrival.distance = p_arrival.distance
                origin.quality.associated_phase_count += 1
                origin.arrivals.append(arrival)
Пример #39
0
def newResourceIdentifier(class_name):
    id_head = "/".join((ID_ROOT, class_name))
    return ResourceIdentifier(prefix=id_head)
Пример #40
0
    '/media/chet/hdd/seismic/NZ/catalogs/2015_dets_nlloc/2015_dets_nlloc_Sherburn_no_dups.xml'
)

temp_dir = '/media/chet/hdd/seismic/NZ/templates/rotnga_2015/2015_det2cats/*'
# temp_dir = '/projects/nesi00228/data/templates/nlloc_reloc/dayproc_4-27/*'
temp_files = glob(temp_dir)
template_dict = {}
for filename in temp_files:
    if filename.split('/')[-1].rstrip('.mseed').split('_')[-1] == 'self':
        uri_name = 'smi:local/' + \
                   filename.split('/')[-1].rstrip('.mseed')
    else:
        utc = UTCDateTime(filename.split('_')[-1].rstrip('.mseed'))
        uri_name = 'smi:local/' +\
                   filename.split('/')[-1].rstrip('.mseed')
    uri = ResourceIdentifier(uri_name)
    template_dict[uri] = read(filename)

# Correcting different naming of resource_id and wav filenames
cnt = 0
for ev in cat:
    if str(ev.resource_id).split('_')[-1] == 'self':
        cnt += 1
        print('This is a self detection: %d' % cnt)
        # Should theoretically be able to extract detect time from first pick - template prepick time...
        det_time = min([pk.time for pk in ev.picks]) - 0.1
        wav_id = ResourceIdentifier(
            str(ev.resource_id).split('/')[-1].split('_')[0] + '_' +
            str(det_time))
        find_file = temp_dir.rstrip('*') + str(wav_id) + '.mseed'
        if os.path.isfile(find_file):
Пример #41
0
def surf_events_to_cat(loc_file, pick_file):
    """
    Take location files (hypoinverse formatted) and picks (format TBD)
    and creates a single obspy catalog for later use and dissemination.

    :param loc_file: File path
    :param pick_file: File path
    :return: obspy.core.Catalog
    """
    # Read/parse location file and create Events for each
    surf_cat = Catalog()
    # Parse the pick file to a dictionary
    pick_dict = parse_picks(pick_file)
    with open(loc_file, 'r') as f:
        next(f)
        for ln in f:
            ln = ln.strip('\n')
            line = ln.split(',')
            eid = line[0]
            if eid not in pick_dict:
                print('No picks for this location, skipping for now.')
                continue
            ot = UTCDateTime(line[1])
            hmc_east = float(line[2])
            hmc_north = float(line[3])
            hmc_elev = float(line[4])
            gap = float(line[-5])
            rms = float(line[-3])
            errXY = float(line[-2])
            errZ = float(line[-1])
            converter = SURF_converter()
            lon, lat, elev = converter.to_lonlat((hmc_east, hmc_north,
                                                  hmc_elev))
            o = Origin(time=ot, longitude=lon, latitude=lat, depth=130 - elev)
            o.origin_uncertainty = OriginUncertainty()
            o.quality = OriginQuality()
            ou = o.origin_uncertainty
            oq = o.quality
            ou.horizontal_uncertainty = errXY * 1e3
            ou.preferred_description = "horizontal uncertainty"
            o.depth_errors.uncertainty = errZ * 1e3
            oq.standard_error = rms
            oq.azimuthal_gap = gap
            extra = AttribDict({
                'hmc_east': {
                    'value': hmc_east,
                    'namespace': 'smi:local/hmc'
                },
                'hmc_north': {
                    'value': hmc_north,
                    'namespace': 'smi:local/hmc'
                },
                'hmc_elev': {
                    'value': hmc_elev,
                    'namespace': 'smi:local/hmc'
                },
                'hmc_eid': {
                    'value': eid,
                    'namespace': 'smi:local/hmc'
                }
            })
            o.extra = extra
            rid = ResourceIdentifier(id=ot.strftime('%Y%m%d%H%M%S%f'))
            # Dummy magnitude of 1. for all events until further notice
            mag = Magnitude(mag=1., mag_errors=QuantityError(uncertainty=1.))
            ev = Event(origins=[o], magnitudes=[mag],
                       picks=pick_dict[eid], resource_id=rid)
            surf_cat.append(ev)
    return surf_cat
Пример #42
0
 def cat1_bad_arrival_pick_id(self, cat1):
     cat = cat1.copy()
     rid = ResourceIdentifier()
     cat[0].origins[0].arrivals[0].pick_id = rid
     return cat
Пример #43
0
    def _parseRecordE(self, line, event):
        """
        Parses the 'error and magnitude' record E
        """
        orig_time_stderr = self._float(line[2:7])
        latitude_stderr = self._float(line[8:14])
        longitude_stderr = self._float(line[15:21])
        depth_stderr = self._float(line[22:27])
        mb_mag = self._float(line[28:31])
        mb_nsta = self._int(line[32:35])
        Ms_mag = self._float(line[36:39])
        Ms_nsta = self._int(line[39:42])
        mag1 = self._float(line[42:45])
        mag1_type = line[45:47]
        mag1_source_code = line[47:51].strip()
        mag2 = self._float(line[51:54])
        mag2_type = line[54:56]
        mag2_source_code = line[56:60].strip()

        evid = event.resource_id.id.split('/')[-1]
        origin = event.origins[0]
        self._storeUncertainty(origin.time_errors, orig_time_stderr)
        self._storeUncertainty(origin.latitude_errors,
                               self._latErrToDeg(latitude_stderr))
        self._storeUncertainty(
            origin.longitude_errors,
            self._lonErrToDeg(longitude_stderr, origin.latitude))
        self._storeUncertainty(origin.depth_errors, depth_stderr, scale=1000)
        if mb_mag is not None:
            mag = Magnitude()
            res_id = '/'.join((res_id_prefix, 'magnitude', evid, 'mb'))
            mag.resource_id = ResourceIdentifier(id=res_id)
            mag.creation_info = CreationInfo(agency_id='USGS-NEIC')
            mag.mag = mb_mag
            mag.magnitude_type = 'Mb'
            mag.station_count = mb_nsta
            mag.origin_id = origin.resource_id
            event.magnitudes.append(mag)
        if Ms_mag is not None:
            mag = Magnitude()
            res_id = '/'.join((res_id_prefix, 'magnitude', evid, 'ms'))
            mag.resource_id = ResourceIdentifier(id=res_id)
            mag.creation_info = CreationInfo(agency_id='USGS-NEIC')
            mag.mag = Ms_mag
            mag.magnitude_type = 'Ms'
            mag.station_count = Ms_nsta
            mag.origin_id = origin.resource_id
            event.magnitudes.append(mag)
        if mag1 is not None:
            mag = Magnitude()
            mag1_id = mag1_type.lower()
            res_id = '/'.join((res_id_prefix, 'magnitude', evid, mag1_id))
            mag.resource_id = ResourceIdentifier(id=res_id)
            mag.creation_info = CreationInfo(agency_id=mag1_source_code)
            mag.mag = mag1
            mag.magnitude_type = mag1_type
            mag.origin_id = origin.resource_id
            event.magnitudes.append(mag)
        if mag2 is not None:
            mag = Magnitude()
            mag2_id = mag2_type.lower()
            if mag2_id == mag1_id:
                mag2_id += '2'
            res_id = '/'.join((res_id_prefix, 'magnitude', evid, mag2_id))
            mag.resource_id = ResourceIdentifier(id=res_id)
            mag.creation_info = CreationInfo(agency_id=mag2_source_code)
            mag.mag = mag2
            mag.magnitude_type = mag2_type
            mag.origin_id = origin.resource_id
            event.magnitudes.append(mag)
Пример #44
0
 def test_focalmechanism(self):
     """
     Tests FocalMechanism object.
     """
     self.assertEqual(len(self.catalog[0].focal_mechanisms), 4)
     fm = self.catalog[0].focal_mechanisms[0]
     self.assertEqual(
         fm.resource_id,
         ResourceIdentifier(id='quakeml:us.anss.org/focalmechanism/'
                            '20120101052755.98/ucmt/mwc'))
     # general
     self.assertEqual(fm.waveform_id, [])
     self.assertEqual(fm.triggering_origin_id, None)
     self.assertEqual(fm.azimuthal_gap, None)
     self.assertEqual(fm.station_polarity_count, None)
     self.assertEqual(fm.misfit, None)
     self.assertEqual(fm.station_distribution_ratio, None)
     self.assertEqual(
         fm.method_id,
         ResourceIdentifier(id='quakeml:us.anss.org/methodID=CMT'))
     # comments
     self.assertEqual(len(fm.comments), 0)
     # creation info
     self.assertEqual(fm.creation_info.author, None)
     self.assertEqual(fm.creation_info.agency_id, 'UCMT')
     self.assertEqual(fm.creation_info.author_uri, None)
     self.assertEqual(fm.creation_info.agency_uri, None)
     self.assertEqual(fm.creation_info.creation_time, None)
     self.assertEqual(fm.creation_info.version, None)
     # nodalPlanes
     self.assertAlmostEqual(fm.nodal_planes.nodal_plane_1.strike, 5.0)
     self.assertAlmostEqual(fm.nodal_planes.nodal_plane_1.dip, 85.0)
     self.assertAlmostEqual(fm.nodal_planes.nodal_plane_1.rake, -76.0)
     self.assertAlmostEqual(fm.nodal_planes.nodal_plane_2.strike, 116.0)
     self.assertAlmostEqual(fm.nodal_planes.nodal_plane_2.dip, 15.0)
     self.assertAlmostEqual(fm.nodal_planes.nodal_plane_2.rake, -159.0)
     self.assertEqual(fm.nodal_planes.preferred_plane, None)
     # principalAxes
     self.assertAlmostEqual(fm.principal_axes.t_axis.azimuth, 82.0)
     self.assertAlmostEqual(fm.principal_axes.t_axis.plunge, 38.0)
     self.assertAlmostEqual(fm.principal_axes.t_axis.length, 1.87e+19)
     self.assertAlmostEqual(fm.principal_axes.p_axis.azimuth, 290.0)
     self.assertAlmostEqual(fm.principal_axes.p_axis.plunge, 49.0)
     self.assertAlmostEqual(fm.principal_axes.p_axis.length, -1.87e+19)
     self.assertEqual(fm.principal_axes.n_axis.azimuth, 184)
     self.assertEqual(fm.principal_axes.n_axis.plunge, 14)
     self.assertEqual(fm.principal_axes.n_axis.length, 0.0)
     # momentTensor
     mt = fm.moment_tensor
     self.assertEqual(
         mt.resource_id,
         ResourceIdentifier(id='quakeml:us.anss.org/momenttensor/'
                            '20120101052755.98/ucmt/mwc'))
     self.assertAlmostEqual(mt.scalar_moment, 1.9e+19)
     self.assertAlmostEqual(mt.tensor.m_rr, -3.4e+18)
     self.assertAlmostEqual(mt.tensor.m_tt, -8e+17)
     self.assertAlmostEqual(mt.tensor.m_pp, 4.2e+18)
     self.assertAlmostEqual(mt.tensor.m_rt, -1.9e+18)
     self.assertAlmostEqual(mt.tensor.m_rp, -1.77e+19)
     self.assertAlmostEqual(mt.tensor.m_tp, -4.2e+18)
     self.assertEqual(mt.clvd, None)
Пример #45
0
    def _parseRecordDp(self, line, event):
        """
        Parses the 'source parameter data - primary' record Dp
        """
        source_contributor = line[2:6].strip()
        computation_type = line[6]
        exponent = self._intZero(line[7])
        scale = math.pow(10, exponent)
        centroid_origin_time = line[8:14] + '.' + line[14]
        orig_time_stderr = line[15:17]
        if orig_time_stderr == 'FX':
            orig_time_stderr = 'Fixed'
        else:
            orig_time_stderr = \
                self._floatWithFormat(orig_time_stderr, '2.1', scale)
        centroid_latitude = self._floatWithFormat(line[17:21], '4.2')
        lat_type = line[21]
        if centroid_latitude is not None:
            centroid_latitude *= self._coordinateSign(lat_type)
        lat_stderr = line[22:25]
        if lat_stderr == 'FX':
            lat_stderr = 'Fixed'
        else:
            lat_stderr = self._floatWithFormat(lat_stderr, '3.2', scale)
        centroid_longitude = self._floatWithFormat(line[25:30], '5.2')
        lon_type = line[30]
        if centroid_longitude is not None:
            centroid_longitude *= self._coordinateSign(lon_type)
        lon_stderr = line[31:34]
        if lon_stderr == 'FX':
            lon_stderr = 'Fixed'
        else:
            lon_stderr = self._floatWithFormat(lon_stderr, '3.2', scale)
        centroid_depth = self._floatWithFormat(line[34:38], '4.1')
        depth_stderr = line[38:40]
        if depth_stderr == 'FX' or depth_stderr == 'BD':
            depth_stderr = 'Fixed'
        else:
            depth_stderr = self._floatWithFormat(depth_stderr, '2.1', scale)
        station_number = self._intZero(line[40:43])
        component_number = self._intZero(line[43:46])
        station_number2 = self._intZero(line[46:48])
        component_number2 = self._intZero(line[48:51])
        # unused: half_duration = self._floatWithFormat(line[51:54], '3.1')
        moment = self._floatWithFormat(line[54:56], '2.1')
        moment_stderr = self._floatWithFormat(line[56:58], '2.1')
        moment_exponent = self._int(line[58:60])
        if (moment is not None) and (moment_exponent is not None):
            moment *= math.pow(10, moment_exponent)
        if (moment_stderr is not None) and (moment_exponent is not None):
            moment_stderr *= math.pow(10, moment_exponent)

        evid = event.resource_id.id.split('/')[-1]
        # Create a new origin only if centroid time is defined:
        origin = None
        if centroid_origin_time.strip() != '.':
            origin = Origin()
            res_id = '/'.join(
                (res_id_prefix, 'origin', evid, source_contributor.lower(),
                 'mw' + computation_type.lower()))
            origin.resource_id = ResourceIdentifier(id=res_id)
            origin.creation_info = \
                CreationInfo(agency_id=source_contributor)
            date = event.origins[0].time.strftime('%Y%m%d')
            origin.time = UTCDateTime(date + centroid_origin_time)
            # Check if centroid time is on the next day:
            if origin.time < event.origins[0].time:
                origin.time += timedelta(days=1)
            self._storeUncertainty(origin.time_errors, orig_time_stderr)
            origin.latitude = centroid_latitude
            origin.longitude = centroid_longitude
            origin.depth = centroid_depth * 1000
            if lat_stderr == 'Fixed' and lon_stderr == 'Fixed':
                origin.epicenter_fixed = True
            else:
                self._storeUncertainty(origin.latitude_errors,
                                       self._latErrToDeg(lat_stderr))
                self._storeUncertainty(
                    origin.longitude_errors,
                    self._lonErrToDeg(lon_stderr, origin.latitude))
            if depth_stderr == 'Fixed':
                origin.depth_type = 'operator assigned'
            else:
                origin.depth_type = 'from location'
                self._storeUncertainty(origin.depth_errors,
                                       depth_stderr,
                                       scale=1000)
            quality = OriginQuality()
            quality.used_station_count = \
                station_number + station_number2
            quality.used_phase_count = \
                component_number + component_number2
            origin.quality = quality
            origin.type = 'centroid'
            event.origins.append(origin)
        focal_mechanism = FocalMechanism()
        res_id = '/'.join(
            (res_id_prefix, 'focalmechanism', evid, source_contributor.lower(),
             'mw' + computation_type.lower()))
        focal_mechanism.resource_id = ResourceIdentifier(id=res_id)
        focal_mechanism.creation_info = \
            CreationInfo(agency_id=source_contributor)
        moment_tensor = MomentTensor()
        if origin is not None:
            moment_tensor.derived_origin_id = origin.resource_id
        else:
            # this is required for QuakeML validation:
            res_id = '/'.join((res_id_prefix, 'no-origin'))
            moment_tensor.derived_origin_id = \
                ResourceIdentifier(id=res_id)
        for mag in event.magnitudes:
            if mag.creation_info.agency_id == source_contributor:
                moment_tensor.moment_magnitude_id = mag.resource_id
        res_id = '/'.join(
            (res_id_prefix, 'momenttensor', evid, source_contributor.lower(),
             'mw' + computation_type.lower()))
        moment_tensor.resource_id = ResourceIdentifier(id=res_id)
        moment_tensor.scalar_moment = moment
        self._storeUncertainty(moment_tensor.scalar_moment_errors,
                               moment_stderr)
        data_used = DataUsed()
        data_used.station_count = station_number + station_number2
        data_used.component_count = component_number + component_number2
        if computation_type == 'C':
            res_id = '/'.join((res_id_prefix, 'methodID=CMT'))
            focal_mechanism.method_id = ResourceIdentifier(id=res_id)
            # CMT algorithm uses long-period body waves,
            # very-long-period surface waves and
            # intermediate period surface waves (since 2004
            # for shallow and intermediate-depth earthquakes
            # --Ekstrom et al., 2012)
            data_used.wave_type = 'combined'
        if computation_type == 'M':
            res_id = '/'.join((res_id_prefix, 'methodID=moment_tensor'))
            focal_mechanism.method_id = ResourceIdentifier(id=res_id)
            # FIXME: not sure which kind of data is used by
            # "moment tensor" algorithm.
            data_used.wave_type = 'unknown'
        elif computation_type == 'B':
            res_id = '/'.join((res_id_prefix, 'methodID=broadband_data'))
            focal_mechanism.method_id = ResourceIdentifier(id=res_id)
            # FIXME: is 'combined' correct here?
            data_used.wave_type = 'combined'
        elif computation_type == 'F':
            res_id = '/'.join((res_id_prefix, 'methodID=P-wave_first_motion'))
            focal_mechanism.method_id = ResourceIdentifier(id=res_id)
            data_used.wave_type = 'P waves'
        elif computation_type == 'S':
            res_id = '/'.join((res_id_prefix, 'methodID=scalar_moment'))
            focal_mechanism.method_id = ResourceIdentifier(id=res_id)
            # FIXME: not sure which kind of data is used
            # for scalar moment determination.
            data_used.wave_type = 'unknown'
        moment_tensor.data_used = [data_used]
        focal_mechanism.moment_tensor = moment_tensor
        event.focal_mechanisms.append(focal_mechanism)
        return focal_mechanism
Пример #46
0
def _read_evt(filename, encoding='utf-8', **kwargs):
    """
    Read a SeismicHandler EVT file and returns an ObsPy Catalog object.

    .. warning::
        This function should NOT be called directly, it registers via the
        ObsPy :func:`~obspy.core.event.read_events` function, call this
        instead.

    :param str encoding: encoding used (default: utf-8)

    :rtype: :class:`~obspy.core.event.Catalog`
    :return: An ObsPy Catalog object.

    .. note::
        The following fields are supported by this function: %s.

        Compare with http://www.seismic-handler.org/wiki/ShmDocFileEvt
    """
    with io.open(filename, 'r', encoding=encoding) as f:
        temp = f.read()
    # first create phases and phases_o dictionaries for different phases
    # and phases with origin information
    phases = defaultdict(list)
    phases_o = {}
    phase = {}
    evid = None
    for line in temp.splitlines():
        if 'End of Phase' in line:
            if 'origin time' in phase.keys():
                if evid in phases_o:
                    # found more than one origin
                    pass
                phases_o[evid] = phase
            phases[evid].append(phase)
            phase = {}
            evid = None
        elif line.strip() != '':
            try:
                key, value = line.split(':', 1)
            except ValueError:
                continue
            key = key.strip().lower()
            value = value.strip()
            if key == 'event id':
                evid = value
            elif value != '':
                phase[key] = value
    assert evid is None

    # now create obspy Events from phases and phases_o dictionaries
    events = []
    for evid in phases:
        picks = []
        arrivals = []
        stamags = []
        origins = []
        po = None
        magnitudes = []
        pm = None
        for p in phases[evid]:
            sta = p.get('station code', '')
            comp = p.get('component', '')
            pick_kwargs = _kw(p, 'pick')
            widargs = _resolve_seedid(sta,
                                      comp,
                                      time=pick_kwargs['time'],
                                      **kwargs)
            wid = WaveformStreamID(*widargs)
            pick = Pick(waveform_id=wid, **pick_kwargs)
            arrival = Arrival(pick_id=pick.resource_id, **_kw(p, 'arrival'))
            picks.append(pick)
            arrivals.append(arrival)
            stamags_temp, _ = _mags(p, evid, stamag=True, wid=wid)
            stamags.extend(stamags_temp)
        if evid in phases_o:
            o = phases_o[evid]
            uncertainty = OriginUncertainty(**_kw(o, 'origin_uncertainty'))
            origin = Origin(arrivals=arrivals,
                            origin_uncertainty=uncertainty,
                            **_kw(o, 'origin'))
            if origin.latitude is None or origin.longitude is None:
                warn('latitude or longitude not set for event %s' % evid)
            else:
                if origin.longitude_errors.uncertainty is not None:
                    origin.longitude_errors.uncertainty *= cos(
                        origin.latitude / 180 * pi)
                origins = [origin]
                po = origin.resource_id
            magnitudes, pm = _mags(o, evid)
        else:
            o = p
        event = Event(resource_id=ResourceIdentifier(evid),
                      picks=picks,
                      origins=origins,
                      magnitudes=magnitudes,
                      station_magnitudes=stamags,
                      preferred_origin_id=po,
                      preferred_magnitude_id=pm,
                      **_kw(o, 'event'))
        events.append(event)
    return Catalog(events,
                   description='Created from SeismicHandler EVT format')