Beispiel #1
0
 def __setattr__(self, name, value):
     """
     Custom property implementation that works if the class is
     inheriting from AttribDict.
     """
     # Pass to the parent method if not a custom property.
     if name not in self._property_dict.keys():
         AttribDict.__setattr__(self, name, value)
         return
     attrib_type = self._property_dict[name]
     # If the value is None or already the correct type just set it.
     if (value is not None) and (type(value) is not attrib_type):
         # If it is a dict, and the attrib_type is no dict, than all
         # values will be assumed to be keyword arguments.
         if isinstance(value, dict):
             new_value = attrib_type(**value)
         else:
             new_value = attrib_type(value)
         if new_value is None:
             msg = 'Setting attribute "%s" failed. ' % (name)
             msg += 'Value "%s" could not be converted to type "%s"' % \
                 (str(value), str(attrib_type))
             raise ValueError(msg)
         value = new_value
     AttribDict.__setattr__(self, name, value)
     # If "name" is resource_id and value is not None, set the referred
     # object of the ResourceIdentifier to self.
     if name == "resource_id" and value is not None:
         self.resource_id.set_referred_object(self)
Beispiel #2
0
def _parse_hypo2000_file(hypo_file):
    hypo = AttribDict()
    picks = list()
    hypo_line = False
    station_line = False
    oldpick = None
    for line in open(hypo_file):
        word = line.split()
        if not word:
            continue
        if hypo_line:
            hypo = _parse_hypo2000_hypo_line(line)
            evid = os.path.basename(hypo_file)
            evid = evid.replace('.txt', '')
            hypo.evid = evid
        if station_line:
            try:
                pick = _parse_hypo2000_station_line(
                    line, oldpick, hypo.orig_time)
                oldpick = pick
                picks.append(pick)
            except Exception:
                continue
        if word[0] == 'YEAR':
            hypo_line = True
            continue
        hypo_line = False
        if word[0] == 'STA':
            station_line = True
    if not hypo:
        raise TypeError('Could not find hypocenter data.')
    return hypo, picks
Beispiel #3
0
 def test_get_stations(self):
     """
     """
     # initialize client
     client = Client(user='******')
     # example 1
     start = UTCDateTime(2008, 1, 1)
     end = start + 1
     result = client.get_stations(start, end, 'BW')
     self.assertTrue(
         AttribDict({'remark': '', 'code': 'RWMO', 'elevation': 763.0,
                     'description': 'Wildenmoos, Bavaria, BW-Net',
                     'start': UTCDateTime(2006, 7, 4, 0, 0),
                     'restricted': False, 'archive_net': '',
                     'longitude': 12.729887, 'affiliation': 'BayernNetz',
                     'depth': None, 'place': 'Wildenmoos',
                     'country': 'BW-Net', 'latitude': 47.744171,
                     'end': None}) in result)
     # example 2
     expected = AttribDict(
         {'code': 'WDD', 'description': 'Wied Dalam',
          'affiliation': '', 'country': '', 'place': '', 'remark': '',
          'restricted': False, 'archive_net': '',
          'latitude': 35.8373, 'longitude': 14.5242,
          'elevation': 44.0, 'depth': None,
          'start': UTCDateTime(1995, 7, 6, 0, 0), 'end': None})
     # routing default
     result = client.get_stations(start, end, 'MN')
     self.assertTrue(expected in result)
     # w/o routing
     result = client.get_stations(start, end, 'MN', route=False)
     self.assertTrue(expected in result)
     # w/ routing
     result = client.get_stations(start, end, 'MN', route=True)
     self.assertTrue(expected in result)
Beispiel #4
0
 def __setattr__(self, name, value):
     """
     Custom property implementation that works if the class is
     inheriting from AttribDict.
     """
     # Pass to the parent method if not a custom property.
     if name not in self._property_dict.keys():
         AttribDict.__setattr__(self, name, value)
         return
     attrib_type = self._property_dict[name]
     # If the value is None or already the correct type just set it.
     if (value is not None) and (type(value) is not attrib_type):
         # If it is a dict, and the attrib_type is no dict, than all
         # values will be assumed to be keyword arguments.
         if isinstance(value, dict):
             new_value = attrib_type(**value)
         else:
             new_value = attrib_type(value)
         if new_value is None:
             msg = 'Setting attribute "%s" failed. ' % (name)
             msg += 'Value "%s" could not be converted to type "%s"' % \
                 (str(value), str(attrib_type))
             raise ValueError(msg)
         value = new_value
     AttribDict.__setattr__(self, name, value)
     # If "name" is resource_id and value is not None, set the referred
     # object of the ResourceIdentifier to self.
     if name == "resource_id" and value is not None:
         self.resource_id.set_referred_object(self)
Beispiel #5
0
def read_input_file(filename):
    """
    Parses an SW4 input file to a nested
    :class:`obspy.core.util.attribdict.AttribDict` / list /
    :class:`obspy.core.util.attribdict.AttribDict` structure.

    :type filename: str
    :param filename: Filename (potentially with relative/absolute path) of SW4
        config/input file.
    :rtype: :class:`obspy.core.util.attribdict.AttribDict`
    :returns: Parsed SW4 simulation input/config file.
    """

    config = AttribDict()
    with open(filename) as fh:
        for line in fh:
            # get rid of comments
            line = line.split("#")[0].strip()
            if not line:
                continue
            line = line.split()
            config_category = config.setdefault(line.pop(0), [])
            config_item = AttribDict()
            for item in line:
                key, value = item.split("=", 1)
                config_item[key] = _decode_string_value(value)
            config_category.append(config_item)
    return config
Beispiel #6
0
 def test_setitem(self):
     """
     Tests __setitem__ method of AttribDict class.
     """
     # 1
     ad = AttribDict()
     ad['test'] = 'NEW'
     self.assertEqual(ad['test'], 'NEW')
     self.assertEqual(ad.test, 'NEW')
     self.assertEqual(ad.get('test'), 'NEW')
     self.assertEqual(ad.__getattr__('test'), 'NEW')
     self.assertEqual(ad.__getitem__('test'), 'NEW')
     self.assertEqual(ad.__dict__['test'], 'NEW')
     self.assertEqual(ad.__dict__.get('test'), 'NEW')
     self.assertTrue('test' in ad)
     self.assertTrue('test' in ad.__dict__)
     # 2
     ad = AttribDict()
     ad.__setitem__('test', 'NEW')
     self.assertEqual(ad['test'], 'NEW')
     self.assertEqual(ad.test, 'NEW')
     self.assertEqual(ad.get('test'), 'NEW')
     self.assertEqual(ad.__getattr__('test'), 'NEW')
     self.assertEqual(ad.__getitem__('test'), 'NEW')
     self.assertEqual(ad.__dict__['test'], 'NEW')
     self.assertEqual(ad.__dict__.get('test'), 'NEW')
     self.assertTrue('test' in ad)
     self.assertTrue('test' in ad.__dict__)
Beispiel #7
0
def rfstats(stats=None, event=None, station=None, stream=None,
            phase='P', dist_range=None):
    """
    Calculate ray specific values like slowness for given event and station.

    :param stats: stats object with event and/or station attributes. Can be
        None if both event and station are given.
    :param event: ObsPy :class:`~obspy.core.event.Event` object
    :param station: station object with attributes latitude, longitude and
        elevation
    :param stream: If a stream is given, stats has to be None. In this case
        rfstats will be called for every stats object in the stream.
    :param phase: string with phase to look for in result of
        :func:`~obspy.taup.taup.getTravelTimes`. Usually this will be 'P' or
        'S' for P and S receiver functions, respectively.
    :type dist_range: tuple of length 2
    :param dist_range: if epicentral of event is not in this intervall, None
        is returned by this function,\n
        if phase == 'P' defaults to (30, 90),\n
        if phase == 'S' defaults to (50, 85)

    :return: ``stats`` object with event and station attributes, distance,
        back_azimuth, inclination, onset and slowness or None if epicentral
        distance is not in the given intervall
    """
    if stream is not None:
        assert stats is None
        for tr in stream:
            rfstats(tr.stats, event, station, None, phase, dist_range)
        return
    phase = phase.upper()
    if dist_range is None and phase in 'PS':
        dist_range = (30, 90) if phase == 'P' else (50, 85)
    if stats is None:
        stats = AttribDict({})
    stats.update(obj2stats(event=event, station=station))
    dist, baz, _ = gps2DistAzimuth(stats.station_latitude,
                                   stats.station_longitude,
                                   stats.event_latitude,
                                   stats.event_longitude)
    dist = kilometer2degrees(dist / 1000)
    if dist_range and not dist_range[0] <= dist <= dist_range[1]:
        return
    tts = getTravelTimes(dist, stats.event_depth)
    tts2 = getTravelTimes(dist, 0)
    tts = [tt for tt in tts if tt['phase_name'] == phase]
    tts2 = [tt for tt in tts2 if tt['phase_name'] == phase]
    if len(tts) == 0 or len(tts2) == 0:
        raise Exception('Taup does not return phase %s at event distance %s' %
                        (phase, dist))
    onset = stats.event_time + tts[0]['time']
    inc = tts2[0]['take-off angle']  # approximation
    v = 5.8 if 'P' in phase else 3.36  # iasp91
    slowness = 6371. * sin(pi / 180. * inc) / v / 180 * pi
    stats.update({'distance': dist, 'back_azimuth': baz, 'inclination': inc,
                  'onset': onset, 'slowness': slowness})
    return stats
Beispiel #8
0
def _get_beamforming_example_stream():
    # Load data
    from obspy import read
    from obspy.core.util import AttribDict
    from obspy.signal.invsim import corn_freq_2_paz
    st = read("https://examples.obspy.org/agfa.mseed")
    # Set PAZ and coordinates for all 5 channels
    st[0].stats.paz = AttribDict({
        'poles': [(-0.03736 - 0.03617j), (-0.03736 + 0.03617j)],
        'zeros': [0j, 0j],
        'sensitivity': 205479446.68601453,
        'gain': 1.0})
    st[0].stats.coordinates = AttribDict({
        'latitude': 48.108589,
        'elevation': 0.450000,
        'longitude': 11.582967})
    st[1].stats.paz = AttribDict({
        'poles': [(-0.03736 - 0.03617j), (-0.03736 + 0.03617j)],
        'zeros': [0j, 0j],
        'sensitivity': 205479446.68601453,
        'gain': 1.0})
    st[1].stats.coordinates = AttribDict({
        'latitude': 48.108192,
        'elevation': 0.450000,
        'longitude': 11.583120})
    st[2].stats.paz = AttribDict({
        'poles': [(-0.03736 - 0.03617j), (-0.03736 + 0.03617j)],
        'zeros': [0j, 0j],
        'sensitivity': 250000000.0,
        'gain': 1.0})
    st[2].stats.coordinates = AttribDict({
        'latitude': 48.108692,
        'elevation': 0.450000,
        'longitude': 11.583414})
    st[3].stats.paz = AttribDict({
        'poles': [(-4.39823 + 4.48709j), (-4.39823 - 4.48709j)],
        'zeros': [0j, 0j],
        'sensitivity': 222222228.10910088,
        'gain': 1.0})
    st[3].stats.coordinates = AttribDict({
        'latitude': 48.108456,
        'elevation': 0.450000,
        'longitude': 11.583049})
    st[4].stats.paz = AttribDict({
        'poles': [(-4.39823 + 4.48709j), (-4.39823 - 4.48709j), (-2.105 + 0j)],
        'zeros': [0j, 0j, 0j],
        'sensitivity': 222222228.10910088,
        'gain': 1.0})
    st[4].stats.coordinates = AttribDict({
        'latitude': 48.108730,
        'elevation': 0.450000,
        'longitude': 11.583157})
    # Instrument correction to 1Hz corner frequency
    paz1hz = corn_freq_2_paz(1.0, damp=0.707)
    st.simulate(paz_remove='self', paz_simulate=paz1hz)
    return st
Beispiel #9
0
 def state(self):
     # directory where the test files are located
     out = AttribDict()
     out.path = PATH
     out.path_images = os.path.join(PATH, os.pardir, "images")
     # some pre-computed ppsd used for plotting tests:
     # (ppsd._psd_periods was downcast to np.float16 to save space)
     out.example_ppsd_npz = os.path.join(PATH, "ppsd_kw1_ehz.npz")
     # ignore some "RuntimeWarning: underflow encountered in multiply"
     return out
Beispiel #10
0
def _get_plot_starttime(event: Event, st: Stream) -> UTCDateTime:
    """Get starttime of a plot given an event and a stream."""
    try:
        attribute_with_time = event.preferred_origin() or event.origins[0]
    except (AttributeError, IndexError):
        try:
            attribute_with_time = AttribDict(
                {"time": min([p.time for p in event.picks]) - 5})
        except ValueError:
            attribute_with_time = AttribDict(
                {"time": min([tr.stats.starttime for tr in st])})
    return attribute_with_time.time
Beispiel #11
0
 def test_init(self):
     """
     Tests initialization of AttribDict class.
     """
     ad = AttribDict({'test': 'NEW'})
     self.assertEqual(ad['test'], 'NEW')
     self.assertEqual(ad.test, 'NEW')
     self.assertEqual(ad.get('test'), 'NEW')
     self.assertEqual(ad.__getattr__('test'), 'NEW')
     self.assertEqual(ad.__getitem__('test'), 'NEW')
     self.assertEqual(ad.__dict__['test'], 'NEW')
     self.assertEqual(ad.__dict__.get('test'), 'NEW')
     self.assertTrue('test' in ad)
     self.assertTrue('test' in ad.__dict__)
Beispiel #12
0
 def test_popitem(self):
     """
     Tests pop method of AttribDict class.
     """
     ad = AttribDict()
     ad['test2'] = 'test'
     # removing via popitem
     temp = ad.popitem()
     self.assertEquals(temp, ('test2', 'test'))
     self.assertFalse('test2' in ad)
     self.assertFalse('test2' in ad.__dict__)
     self.assertFalse(hasattr(ad, 'test2'))
     # popitem for empty AttribDict raises a KeyError
     self.assertRaises(KeyError, ad.popitem)
Beispiel #13
0
        def __setattr__(self, name, value):
            """
            Custom property implementation that works if the class is
            inheriting from AttribDict.
            """
            # avoid type casting of 'extra' attribute, to make it possible to
            # control ordering of extra tags by using an OrderedDict for
            # 'extra'.
            if name == 'extra':
                dict.__setattr__(self, name, value)
                return
            # Pass to the parent method if not a custom property.
            if name not in self._property_dict.keys():
                AttribDict.__setattr__(self, name, value)
                return
            attrib_type = self._property_dict[name]
            # If the value is None or already the correct type just set it.
            if (value is not None) and (type(value) is not attrib_type):
                # If it is a dict, and the attrib_type is no dict, than all
                # values will be assumed to be keyword arguments.
                if isinstance(value, dict):
                    new_value = attrib_type(**value)
                else:
                    new_value = attrib_type(value)
                if new_value is None:
                    msg = 'Setting attribute "%s" failed. ' % (name)
                    msg += 'Value "%s" could not be converted to type "%s"' % \
                        (str(value), str(attrib_type))
                    raise ValueError(msg)
                value = new_value

            # Make sure all floats are finite - otherwise this is most
            # likely a user error.
            if attrib_type is float and value is not None:
                if not np.isfinite(value):
                    msg = "On %s object: Value '%s' for '%s' is " \
                          "not a finite floating point value." % (
                              type(self).__name__, str(value), name)

                    raise ValueError(msg)

            AttribDict.__setattr__(self, name, value)
            # if value is a resource id bind or unbind the resource_id
            if isinstance(value, ResourceIdentifier):
                if name == "resource_id":  # bind the resource_id to self
                    self.resource_id.set_referred_object(self, warn=False)
                else:  # else unbind to allow event scoping later
                    value._parent_key = None
Beispiel #14
0
 def test_nested_stats(self):
     """
     Various setter and getter tests.
     """
     # 1
     stats = Stats()
     stats.test = dict()
     stats.test['test2'] = 'muh'
     assert stats.test.test2 == 'muh'
     assert stats.test['test2'] == 'muh'
     assert stats['test'].test2 == 'muh'
     assert stats['test']['test2'] == 'muh'
     stats.test['test2'] = 'maeh'
     assert stats.test.test2 == 'maeh'
     assert stats.test['test2'] == 'maeh'
     assert stats['test'].test2 == 'maeh'
     assert stats['test']['test2'] == 'maeh'
     # 2 - multiple initialization
     stats = Stats({'muh': 'meah'})
     stats2 = Stats(Stats(Stats(stats)))
     assert stats2.muh == 'meah'
     # 3 - check conversion to AttribDict
     stats = Stats()
     stats.sub1 = {'muh': 'meah'}
     stats.sub2 = AttribDict({'muh2': 'meah2'})
     stats2 = Stats(stats)
     assert isinstance(stats.sub1, AttribDict)
     assert isinstance(stats.sub2, AttribDict)
     assert stats2.sub1.muh == 'meah'
     assert stats2.sub2.muh2 == 'meah2'
Beispiel #15
0
 def test_deepcopy(self):
     """
     Tests deepcopy method of Stats object.
     """
     stats = Stats()
     stats.network = 'BW'
     stats['station'] = 'ROTZ'
     stats['other1'] = {'test1': '1'}
     stats['other2'] = AttribDict({'test2': '2'})
     stats['other3'] = 'test3'
     stats2 = copy.deepcopy(stats)
     stats.network = 'CZ'
     stats.station = 'RJOB'
     assert stats2.__class__ == Stats
     assert stats2.network == 'BW'
     assert stats2.station == 'ROTZ'
     assert stats2.other1.test1 == '1'
     assert stats2.other1.__class__ == AttribDict
     assert len(stats2.other1) == 1
     assert stats2.other2.test2 == '2'
     assert stats2.other2.__class__ == AttribDict
     assert len(stats2.other2) == 1
     assert stats2.other3 == 'test3'
     assert stats.network == 'CZ'
     assert stats.station == 'RJOB'
Beispiel #16
0
def get_data(station, location, component,
             tstart, tend, new=False,
             decimate=False):
    """
    Download the data and insert station location coordinates.
    """
    scl = '.'.join((station,location,component))
    fout = os.path.join('/tmp','_'.join((scl, str(tstart), str(tend))))
    if os.path.isfile(fout) and new is False:
        with open(fout, 'rb') as fh:
            tr = pickle.load(fh)
            return tr
    else:     
        try:
            st, inv = GeoNetFDSNrequest(tstart, tend, 'NZ', station, 
                                                 location, component)
        except FDSNException:
            return None 
        st.remove_sensitivity()
        st.merge(method=1, fill_value=0.)
        tr = st[0]
        tr.trim(tstart, tend)
        tr.data -= tr.data.mean()
        if decimate:
            if int(round(tr.stats.sampling_rate,0)) == 100:
                tr.data -= tr.data.mean()
                tr.taper(0.05)
                tr.decimate(10)
                tr.decimate(10)
        _s = inv[0][0]
        tr.stats.coordinates = AttribDict({'latitude':_s.latitude,
                                           'longitude':_s.longitude})
        with open(fout, 'wb') as fh:
            pickle.dump(tr, fh)
        return tr 
Beispiel #17
0
 def test_writing_text_and_binary_textual_file_headers(self):
     """
     Make sure the textual file header can be written if has been passed
     either as text or as a bytestring.
     """
     # Loop over bytes/text and the textual header encoding.
     for textual_file_header in [b"12345", "12345"]:
         for encoding in ["ASCII", "EBCDIC"]:
             st = read()
             for tr in st:
                 tr.data = np.require(tr.data, dtype=np.float32)
             st.stats = AttribDict()
             st.stats.textual_file_header = textual_file_header
             with io.BytesIO() as buf:
                 # Warning raised to create a complete header.
                 with pytest.warns(UserWarning):
                     st.write(buf,
                              format="SEGY",
                              data_encoding=5,
                              textual_header_encoding=encoding)
                 buf.seek(0, 0)
                 # Read with SEG-Y to preserve the textual file header.
                 st2 = _read_segy(buf)
             self.assertEqual(
                 # Ignore the auto-generated parts of the header.
                 st2.stats.textual_file_header.decode().split()[0],
                 "12345")
Beispiel #18
0
 def test_nestedStats(self):
     """
     Various setter and getter tests.
     """
     #1
     stats = Stats()
     stats.test = dict()
     stats.test['test2'] = 'muh'
     self.assertEqual(stats.test.test2, 'muh')
     self.assertEqual(stats.test['test2'], 'muh')
     self.assertEqual(stats['test'].test2, 'muh')
     self.assertEqual(stats['test']['test2'], 'muh')
     stats.test['test2'] = 'maeh'
     self.assertEqual(stats.test.test2, 'maeh')
     self.assertEqual(stats.test['test2'], 'maeh')
     self.assertEqual(stats['test'].test2, 'maeh')
     self.assertEqual(stats['test']['test2'], 'maeh')
     #2 - multiple initialization
     stats = Stats({'muh': 'meah'})
     stats2 = Stats(Stats(Stats(stats)))
     self.assertEqual(stats2.muh, 'meah')
     #3 - check conversion to AttribDict
     stats = Stats()
     stats.sub1 = {'muh': 'meah'}
     stats.sub2 = AttribDict({'muh2': 'meah2'})
     stats2 = Stats(stats)
     self.assertTrue(isinstance(stats.sub1, AttribDict))
     self.assertTrue(isinstance(stats.sub2, AttribDict))
     self.assertEqual(stats2.sub1.muh, 'meah')
     self.assertEqual(stats2.sub2.muh2, 'meah2')
Beispiel #19
0
 def test_get_stations_inconsistency(self):
     """
     """
     # initialize client
     client = Client(user='******')
     # example 1
     start = UTCDateTime(2008, 1, 1)
     end = start + 1
     result_origin = AttribDict({
         'remark': '',
         'code': 'RWMO',
         'elevation': 763.0,
         'description': 'Wildenmoos, Bavaria, BW-Net',
         'start': UTCDateTime(2006, 7, 4, 0, 0),
         'restricted': False,
         'archive_net': '',
         'longitude': 12.729887,
         'affiliation': 'BayernNetz',
         'depth': None,
         'place': 'Wildenmoos',
         'country': ' BW-Net',
         'latitude': 47.744171,
         'end': None
     })
     # OK: from origin node
     result = client.get_stations(start, end, 'BW', route=True)
     self.assertTrue(result_origin in result)
     # BUT: this one from a different node was modified and fails
     result = client.get_stations(start, end, 'BW')
     self.assertTrue(result_origin in result)
Beispiel #20
0
 def create_obs_network(self):
     obs_stations = self.read_stations()
     for errmsg, logtype in sorted(list(self.unique_errors)):
         if logtype == 'error':
             LOGGER.error(errmsg)
         else:
             LOGGER.warning(errmsg)
     if obs_stations:
         obs_network = inventory.Network(self.experiment_t[0]['net_code_s'])
         obs_network.description = self.experiment_t[0]['longname_s']
         start_time, end_time = self.get_network_date()
         obs_network.start_date = UTCDateTime(start_time)
         obs_network.end_date = UTCDateTime(end_time)
         obs_network.total_number_of_stations = self.total_number_stations
         extra = AttribDict({
             'PH5ReportNum': {
                 'value': self.experiment_t[0]['experiment_id_s'],
                 'namespace': self.manager.iris_custom_ns,
                 'type': 'attribute'
             }
         })
         obs_network.extra = extra
         obs_network.stations = obs_stations
         return obs_network
     else:
         return
Beispiel #21
0
    def create_obs_station(self, station_list, sta_code, array_name,
                           start_date, end_date, sta_longitude, sta_latitude,
                           sta_elevation, deployment):

        obs_station = obspy.core.inventory.Station(sta_code,
                                                   latitude=sta_latitude,
                                                   longitude=sta_longitude,
                                                   start_date=start_date,
                                                   end_date=end_date,
                                                   elevation=sta_elevation)

        obs_station.creation_date = UTCDateTime(
            station_list[deployment][0]['deploy_time/epoch_l'])
        obs_station.termination_date = UTCDateTime(
            station_list[deployment][0]['pickup_time/epoch_l'])

        extra = AttribDict({
            'PH5Array': {
                'value': str(array_name)[8:],
                'namespace': self.manager.iris_custom_ns,
                'type': 'attribute'
            }
        })
        obs_station.extra = extra
        obs_station.site = obspy.core.inventory.Site(
            name=station_list[deployment][0]['location/description_s'])
        return obs_station
Beispiel #22
0
 def test_get_stations(self):
     """
     """
     # initialize client
     client = Client(user='******')
     # example 1
     start = UTCDateTime(2008, 1, 1)
     end = start + 1
     result = client.get_stations(start, end, 'BW')
     self.assertTrue(
         AttribDict({
             'remark': '',
             'code': 'RWMO',
             'elevation': 763.0,
             'description': 'Wildenmoos, Bavaria, BW-Net',
             'start': UTCDateTime(2006, 7, 4, 0, 0),
             'restricted': False,
             'archive_net': '',
             'longitude': 12.729887,
             'affiliation': 'BayernNetz',
             'depth': None,
             'place': 'Wildenmoos',
             'country': ' BW-Net',
             'latitude': 47.744171,
             'end': None
         }) in result)
Beispiel #23
0
 def test_deepcopy(self):
     """
     Tests deepcopy method of Stats object.
     """
     stats = Stats()
     stats.network = 'BW'
     stats['station'] = 'ROTZ'
     stats['other1'] = {'test1': '1'}
     stats['other2'] = AttribDict({'test2': '2'})
     stats['other3'] = 'test3'
     stats2 = copy.deepcopy(stats)
     stats.network = 'CZ'
     stats.station = 'RJOB'
     self.assertEqual(stats2.__class__, Stats)
     self.assertEqual(stats2.network, 'BW')
     self.assertEqual(stats2.station, 'ROTZ')
     self.assertEqual(stats2.other1.test1, '1')
     self.assertEqual(stats2.other1.__class__, AttribDict)
     self.assertEqual(len(stats2.other1), 1)
     self.assertEqual(stats2.other2.test2, '2')
     self.assertEqual(stats2.other2.__class__, AttribDict)
     self.assertEqual(len(stats2.other2), 1)
     self.assertEqual(stats2.other3, 'test3')
     self.assertEqual(stats.network, 'CZ')
     self.assertEqual(stats.station, 'RJOB')
def add_gpscorrection_into_stationxml(csv_file, input_xml, out_xml=None):
    """
    Read in the correction CSV data from a file, get the station metadata node from input_xml file,
    then add the CSV data into the station xml node to write into out_xml

    :param csv_file: input csv file with correction data
    :param input_xml: input original stationXML file which contains the metadata for the network and station of csv_file
    :param out_xml:  Directory of the output xml file
    :return: full path of the output xml file
    """

    ns = "https://github.com/GeoscienceAustralia/hiperseis/xmlns/1.0"

    (net, sta, csv_data) = get_csv_correction_data(csv_file)

    # path2_myxml = "/home/feizhang/Githubz/hiperseis/tests/testdata/7D_2012_2013.xml"
    my_inv = read_inventory(input_xml, format='STATIONXML')

    # https://docs.obspy.org/packages/autogen/obspy.core.inventory.inventory.Inventory.select.html#obspy.core.inventory.inventory.Inventory.select

    selected_inv = my_inv.select(network=net, station=sta)

    # print(selected_inv)

    my_tag = AttribDict()
    my_tag.namespace = ns
    my_tag.value = csv_data

    selected_inv.networks[0].stations[0].extra = AttribDict()
    selected_inv.networks[0].stations[0].extra.gpsclockcorrection = my_tag

    stationxml_with_csv = '%s.%s_station_inv_modified.xml' % (net, sta)

    if out_xml is not None and os.path.isdir(out_xml):
        stationxml_with_csv = os.path.join(out_xml, stationxml_with_csv)

    selected_inv.write(
        stationxml_with_csv,
        format='STATIONXML',
        nsmap={
            'GeoscienceAustralia':
            'https://github.com/GeoscienceAustralia/hiperseis/xmlns/1.0'
        })

    # my_inv.write('modified_inventory.xml', format='STATIONXML')

    return stationxml_with_csv
Beispiel #25
0
def _parse_hypo71_hypocenter(hypo_file):
    with open(hypo_file) as fp:
        line = fp.readline()
        # Skip the first line if it contains
        # characters in the first 10 digits:
        if any(c.isalpha() for c in line[0:10]):
            line = fp.readline()
    hypo = AttribDict()
    timestr = line[0:17]
    # There are two possible formats for the timestring.
    # We try both of them
    try:
        dt = datetime.strptime(timestr, '%y%m%d %H %M%S.%f')
    except Exception:
        dt = datetime.strptime(timestr, '%y%m%d %H%M %S.%f')
    hypo.origin_time = UTCDateTime(dt)
    lat = float(line[17:20])
    lat_deg = float(line[21:26])
    hypo.latitude = lat + lat_deg/60
    lon = float(line[26:30])
    lon_deg = float(line[31:36])
    hypo.longitude = lon + lon_deg/60
    hypo.depth = float(line[36:42])
    evid = os.path.basename(hypo_file)
    evid = evid.replace('.phs', '').replace('.h', '').replace('.hyp', '')
    hypo.evid = evid
    return hypo
Beispiel #26
0
 def test_pop(self):
     """
     Tests pop method of AttribDict class.
     """
     ad = AttribDict()
     ad.test = 1
     ad['test2'] = 'test'
     # removing via pop
     temp = ad.pop('test')
     self.assertEquals(temp, 1)
     self.assertFalse('test' in ad)
     self.assertTrue('test2' in ad)
     self.assertFalse('test' in ad.__dict__)
     self.assertTrue('test2' in ad.__dict__)
     self.assertFalse(hasattr(ad, 'test'))
     self.assertTrue(hasattr(ad, 'test2'))
     # using pop() for not existing element raises a KeyError
     self.assertRaises(KeyError, ad.pop, 'test')
Beispiel #27
0
 def offsets(self):
     """
     Returns a dictionary with element code and x, y offsets from
     array center.
     """
     offsets = {}
     for ele in self.elements:
         offsets[ele.code] = {'x': ele.x, 'y': ele.y}
     return AttribDict(offsets)
Beispiel #28
0
Datei: io.py Projekt: preinh/RF
def read_stations(fname):
    """
    Read station positions from whitespace delimited file
    
    Example file:
    # station  lat  lon  elev
    STN  10.0  -50.0  160
    """
    ret = AttribDict()
    with open(fname) as f:
        for line in f.readlines():
            if not line[0].startswith('#'):
                vals = line.split()
                ret[vals[0]] = AttribDict()
                ret[vals[0]].latitude = float(vals[1])
                ret[vals[0]].longitude = float(vals[2])
                ret[vals[0]].elevation = float(vals[3])
    return ret
Beispiel #29
0
 def test_clear(self):
     """
     Tests clear method of AttribDict class.
     """
     ad = AttribDict()
     ad.test = 1
     ad['test2'] = 'test'
     # removing via pop
     ad.clear()
     self.assertFalse('test' in ad)
     self.assertFalse('test2' in ad)
     self.assertFalse('test' in ad.__dict__)
     self.assertFalse('test2' in ad.__dict__)
     self.assertFalse(hasattr(ad, 'test'))
     self.assertFalse(hasattr(ad, 'test2'))
     # class attributes should be still present
     self.assertTrue(hasattr(ad, 'readonly'))
     self.assertTrue(hasattr(ad, 'priorized_keys'))
Beispiel #30
0
def _add_hypocenter(trace, hypo):
    if hypo is None:
        # Try to get hypocenter information from the SAC header
        try:
            evla = trace.stats.sac.evla
            evlo = trace.stats.sac.evlo
            evdp = trace.stats.sac.evdp
            begin = trace.stats.sac.b
        except AttributeError:
            return

        try:
            tori = trace.stats.sac.o
            origin_time = trace.stats.starttime + tori - begin
        except AttributeError:
            origin_time = None

        if origin_time is not None:
            # make a copy of origin_time and round it to the nearest second
            _second = origin_time.second
            if origin_time.microsecond >= 500000:
                _second += 1
            _microsecond = 0
            _evid_time = origin_time.replace(
                second=_second, microsecond=_microsecond)
        else:
            # make a copy of starttime and round it to the nearest minute
            _starttime = trace.stats.starttime
            _minute = _starttime.minute
            if _starttime.second >= 30:
                _minute += 1
            _second = 0
            _microsecond = 0
            _evid_time = _starttime.replace(
                minute=_minute, second=_second, microsecond=_microsecond)

        hypo = AttribDict()
        hypo.origin_time = origin_time
        try:
            kevnm = trace.stats.sac.kevnm
            # if string is empty, raise Exception
            if not kevnm:
                raise Exception
            # if string has spaces, then kevnm is not a code,
            # so raise Exception
            if ' ' in kevnm:
                raise Exception
            hypo.evid = kevnm
        except Exception:
            hypo.evid = _evid_time.strftime('%Y%m%d_%H%M%S')
        hypo.latitude = evla
        hypo.longitude = evlo
        hypo.depth = evdp
    trace.stats.hypo = hypo
Beispiel #31
0
    def FK(self, st, inv, stime, etime, fmin, fmax, slim, sres, win_len,
           win_frac):

        n = len(st)
        for i in range(n):
            coords = inv.get_coordinates(st[i].id)
            st[i].stats.coordinates = AttribDict({
                'latitude':
                coords['latitude'],
                'elevation':
                coords['elevation'],
                'longitude':
                coords['longitude']
            })

        kwargs = dict(
            # slowness grid: X min, X max, Y min, Y max, Slow Step
            sll_x=-1 * slim,
            slm_x=slim,
            sll_y=-1 * slim,
            slm_y=slim,
            sl_s=sres,
            # sliding open_main_window properties
            win_len=win_len,
            win_frac=win_frac,
            # frequency properties
            frqlow=fmin,
            frqhigh=fmax,
            prewhiten=0,
            # restrict output
            semb_thres=-1e9,
            vel_thres=-1e9,
            timestamp='mlabday',
            stime=stime + 0.1,
            etime=etime - 0.1)

        try:
            out = array_processing(st, **kwargs)

            T = out[:, 0]
            relpower = out[:, 1]
            abspower = out[:, 2]
            AZ = out[:, 3]
            AZ[AZ < 0.0] += 360
            Slowness = out[:, 4]

        except:
            print("Check Parameters and Starttime/Endtime")

            relpower = []
            abspower = []
            AZ = []
            Slowness = []
            T = []

        return relpower, abspower, AZ, Slowness, T
Beispiel #32
0
def _parse_hypocenter_from_event(ev):
    hypo = AttribDict()
    hypo.latitude = ev.latitude
    hypo.longitude = ev.longitude
    hypo.depth = ev.depth
    hypo.origin_time = ev.utcdate
    hypo.evid = ev.event_id
    return hypo
 def state(self):
     """Return test state."""
     out = AttribDict()
     out.path = os.path.join(os.path.dirname(__file__), 'data')
     out.path_images = os.path.join(os.path.dirname(__file__), 'images')
     out.a = np.sin(np.linspace(0, 10, 101))
     out.b = 5 * np.roll(out.a, 5)
     out.c = 5 * np.roll(out.a[:81], 5)
     return out
Beispiel #34
0
    def __init__(self, m=None, **kwargs):
        self._set_m(m)
        # Optional keywords default to None
        inverted = AttribDict()

        keys = ["depth", "ts", "weights", "station_VR", "total_VR", "dd", "ss"]
        # Update given key by their given value
        for key, value in kwargs.items():
            if key in keys:
                inverted[key] = value
        self.inverted = inverted
Beispiel #35
0
 def test_delete(self):
     """
     Tests delete method of AttribDict class.
     """
     ad = AttribDict()
     ad.test = 1
     ad['test2'] = 'test'
     # deleting test using dictionary
     del ad['test']
     self.assertFalse('test' in ad)
     self.assertTrue('test2' in ad)
     self.assertFalse('test' in ad.__dict__)
     self.assertTrue('test2' in ad.__dict__)
     self.assertFalse(hasattr(ad, 'test'))
     self.assertTrue(hasattr(ad, 'test2'))
     # deleting test2 using attribute
     del ad.test2
     self.assertFalse('test2' in ad)
     self.assertFalse('test2' in ad.__dict__)
     self.assertFalse(hasattr(ad, 'test2'))
Beispiel #36
0
    def test_write_with_extra_tags_and_read(self):
        """
        Tests that a QuakeML file with additional custom "extra" tags gets
        written correctly and that when reading it again the extra tags are
        parsed correctly.
        """
        filename = os.path.join(self.path, "quakeml_1.2_origin.xml")

        with warnings.catch_warnings(record=True) as w:
            warnings.simplefilter("always")
            cat = readQuakeML(filename)
            self.assertEqual(len(w), 0)

        # add some custom tags to first event:
        #  - tag with explicit namespace but no explicit ns abbreviation
        #  - tag without explicit namespace (gets obspy default ns)
        #  - tag with explicit namespace and namespace abbreviation
        my_extra = AttribDict(
            {'public': {'value': False,
                        'namespace': r"http://some-page.de/xmlns/1.0",
                        'attrib': {u"some_attrib": u"some_value",
                                   u"another_attrib": u"another_value"}},
             'custom': {'value': u"True",
                        'namespace': r'http://test.org/xmlns/0.1'},
             'new_tag': {'value': 1234,
                         'namespace': r"http://test.org/xmlns/0.1"},
             'tX': {'value': UTCDateTime('2013-01-02T13:12:14.600000Z'),
                    'namespace': r'http://test.org/xmlns/0.1'},
             'dataid': {'namespace': r'http://anss.org/xmlns/catalog/0.1',
                        'type': 'attribute', 'value': '00999999'}})
        nsmap = {"ns0": r"http://test.org/xmlns/0.1",
                 "catalog": r'http://anss.org/xmlns/catalog/0.1'}
        cat[0].extra = my_extra.copy()
        # insert a pick with an extra field
        p = Pick()
        p.extra = {'weight': {'value': 2,
                              'namespace': r"http://test.org/xmlns/0.1"}}
        cat[0].picks.append(p)

        with NamedTemporaryFile() as tf:
            tmpfile = tf.name
            # write file
            cat.write(tmpfile, format="QUAKEML", nsmap=nsmap)
            # check contents
            with open(tmpfile, "r") as fh:
                content = fh.read()
            # check namespace definitions in root element
            expected = ['<q:quakeml',
                        'xmlns:catalog="http://anss.org/xmlns/catalog/0.1"',
                        'xmlns:ns0="http://test.org/xmlns/0.1"',
                        'xmlns:ns1="http://some-page.de/xmlns/1.0"',
                        'xmlns:q="http://quakeml.org/xmlns/quakeml/1.2"',
                        'xmlns="http://quakeml.org/xmlns/bed/1.2"']
            for line in expected:
                self.assertTrue(line in content)
            # check additional tags
            expected = [
                '<ns0:custom>True</ns0:custom>',
                '<ns0:new_tag>1234</ns0:new_tag>',
                '<ns0:tX>2013-01-02T13:12:14.600000Z</ns0:tX>',
                '<ns1:public '
                'another_attrib="another_value" '
                'some_attrib="some_value">false</ns1:public>'
            ]
            for lines in expected:
                self.assertTrue(line in content)
            # now, read again to test if its parsed correctly..
            cat = readQuakeML(tmpfile)
        # when reading..
        #  - namespace abbreviations should be disregarded
        #  - we always end up with a namespace definition, even if it was
        #    omitted when originally setting the custom tag
        #  - custom namespace abbreviations should attached to Catalog
        self.assertTrue(hasattr(cat[0], "extra"))

        def _tostr(x):
            if isinstance(x, bool):
                if x:
                    return str("true")
                else:
                    return str("false")
            return str(x)

        for key, value in my_extra.items():
            my_extra[key]['value'] = _tostr(value['value'])
        self.assertEqual(cat[0].extra, my_extra)
        self.assertTrue(hasattr(cat[0].picks[0], "extra"))
        self.assertEqual(
            cat[0].picks[0].extra,
            {'weight': {'value': '2',
                        'namespace': r'http://test.org/xmlns/0.1'}})
        self.assertTrue(hasattr(cat, "nsmap"))
        self.assertTrue(getattr(cat, "nsmap")['ns0'] == nsmap['ns0'])
Beispiel #37
0
 def test_write_with_extra_tags_without_read_extra(self):
     """
     Tests that a Inventory object that was instantiated with
     custom namespace tags and attributes is written correctly.
     """
     # read the default inventory
     inv = obspy.read_inventory()
     # manually add extra to the dictionary
     network = inv[0]
     network.extra = {}
     ns = 'http://test.myns.ns/'
     # manually add a new custom namespace tag and attribute to the
     # inventory
     network.extra['mynsNetworkTag'] = AttribDict({
                                         'value': 'mynsNetworkTagValue',
                                         'namespace': ns})
     network.extra['mynsNetworkAttrib'] = AttribDict({
                                         'value': 'mynsNetworkAttribValue',
                                         'namespace': ns,
                                         'type': 'attribute'})
     station = inv[0][0]
     station.extra = {}
     station.extra['mynsStationTag'] = AttribDict({
                                         'value': 'mynsStationTagValue',
                                         'namespace': ns})
     station.extra['mynsStationAttrib'] = AttribDict({
                                         'value': 'mynsStationAttribValue',
                                         'namespace': ns,
                                         'type': 'attribute'})
     channel = inv[0][0][0]
     # add data availability to inventory
     channel.data_availability = AttribDict({
                 'start': obspy.UTCDateTime('1998-10-26T20:35:58+00:00'),
                 'end': obspy.UTCDateTime('2014-07-21T12:00:00+00:00')})
     channel.extra = {}
     channel.extra['mynsChannelTag'] = AttribDict({
                                     'value': 'mynsChannelTagValue',
                                     'namespace': ns})
     channel.extra['mynsChannelAttrib'] = AttribDict({
                                         'value': 'mynsChannelAttribValue',
                                         'namespace': ns,
                                         'type': 'attribute'})
     # add nested tags
     nested_tag = AttribDict()
     nested_tag.namespace = ns
     nested_tag.value = AttribDict()
     # add two nested tags
     nested_tag.value.my_nested_tag1 = AttribDict()
     nested_tag.value.my_nested_tag1.namespace = ns
     nested_tag.value.my_nested_tag1.value = 1.23E+10
     nested_tag.value.my_nested_tag2 = AttribDict()
     nested_tag.value.my_nested_tag2.namespace = ns
     nested_tag.value.my_nested_tag2.value = True
     nested_tag.value.my_nested_tag2.attrib = {'{%s}%s' % (
         ns, 'nestedAttribute1'): 'nestedAttributeValue1'}
     channel.extra['nested'] = nested_tag
     with NamedTemporaryFile() as tf:
         # manually add custom namespace definition
         tmpfile = tf.name
         # set namespace map to include only valid custom namespaces
         mynsmap = {'myns': ns}
         # write file with manually defined namespace map
         inv.write(tmpfile, format="STATIONXML", nsmap=mynsmap)
         # check contents
         with open(tmpfile, "rb") as fh:
             # enforce reproducible attribute orders through write_c14n
             obj = etree.fromstring(fh.read()).getroottree()
             buf = io.BytesIO()
             obj.write_c14n(buf)
             buf.seek(0, 0)
             content = buf.read()
         # check namespace definitions in root element
         expected = [
             b'xmlns="http://www.fdsn.org/xml/station/1"',
             b'xmlns:myns="http://test.myns.ns/"',
             b'xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"']
         for line in expected:
             self.assertIn(line, content)
         # check additional tags
         expected = [
             b'<myns:mynsNetworkTag>' +
             b'mynsNetworkTagValue' +
             b'</myns:mynsNetworkTag>',
             b'myns:mynsNetworkAttrib="mynsNetworkAttribValue"',
             b'<myns:mynsStationTag>' +
             b'mynsStationTagValue' +
             b'</myns:mynsStationTag>',
             b'myns:mynsStationAttrib="mynsStationAttribValue"',
             b'<myns:mynsChannelTag>' +
             b'mynsChannelTagValue' +
             b'</myns:mynsChannelTag>',
             b'myns:mynsChannelAttrib="mynsChannelAttribValue"',
             b'<myns:nested>',
             b'<myns:my_nested_tag1>' +
             b'12300000000.0' +
             b'</myns:my_nested_tag1>',
             b'<myns:my_nested_tag2 ' +
             b'myns:nestedAttribute1="nestedAttributeValue1">' +
             b'True' +
             b'</myns:my_nested_tag2>',
             b'</myns:nested>'
         ]
         for line in expected:
             self.assertIn(line, content)
Beispiel #38
0
    def test_write_with_extra_tags_and_read(self):
        """
        Tests that a QuakeML file with additional custom "extra" tags gets
        written correctly and that when reading it again the extra tags are
        parsed correctly.
        """
        filename = os.path.join(self.path, "quakeml_1.2_origin.xml")

        with warnings.catch_warnings(record=True) as w:
            warnings.simplefilter("always")
            cat = _read_quakeml(filename)
            self.assertEqual(len(w), 0)

        # add some custom tags to first event:
        #  - tag with explicit namespace but no explicit ns abbreviation
        #  - tag without explicit namespace (gets obspy default ns)
        #  - tag with explicit namespace and namespace abbreviation
        my_extra = AttribDict(
            {
                "public": {
                    "value": False,
                    "namespace": "http://some-page.de/xmlns/1.0",
                    "attrib": {"some_attrib": "some_value", "another_attrib": "another_value"},
                },
                "custom": {"value": "True", "namespace": "http://test.org/xmlns/0.1"},
                "new_tag": {"value": 1234, "namespace": "http://test.org/xmlns/0.1"},
                "tX": {"value": UTCDateTime("2013-01-02T13:12:14.600000Z"), "namespace": "http://test.org/xmlns/0.1"},
                "dataid": {"namespace": "http://anss.org/xmlns/catalog/0.1", "type": "attribute", "value": "00999999"},
                # some nested tags :
                "quantity": {
                    "namespace": "http://some-page.de/xmlns/1.0",
                    "attrib": {"attrib1": "attrib_value1", "attrib2": "attrib_value2"},
                    "value": {
                        "my_nested_tag1": {"namespace": "http://some-page.de/xmlns/1.0", "value": 1.23e10},
                        "my_nested_tag2": {"namespace": "http://some-page.de/xmlns/1.0", "value": False},
                    },
                },
            }
        )
        nsmap = {"ns0": "http://test.org/xmlns/0.1", "catalog": "http://anss.org/xmlns/catalog/0.1"}
        cat[0].extra = my_extra.copy()
        # insert a pick with an extra field
        p = Pick()
        p.extra = {"weight": {"value": 2, "namespace": "http://test.org/xmlns/0.1"}}
        cat[0].picks.append(p)

        with NamedTemporaryFile() as tf:
            tmpfile = tf.name
            # write file
            cat.write(tmpfile, format="QUAKEML", nsmap=nsmap)
            # check contents
            with open(tmpfile, "rb") as fh:
                # enforce reproducible attribute orders through write_c14n
                obj = etree.fromstring(fh.read()).getroottree()
                buf = io.BytesIO()
                obj.write_c14n(buf)
                buf.seek(0, 0)
                content = buf.read()
            # check namespace definitions in root element
            expected = [
                b"<q:quakeml",
                b'xmlns:catalog="http://anss.org/xmlns/catalog/0.1"',
                b'xmlns:ns0="http://test.org/xmlns/0.1"',
                b'xmlns:ns1="http://some-page.de/xmlns/1.0"',
                b'xmlns:q="http://quakeml.org/xmlns/quakeml/1.2"',
                b'xmlns="http://quakeml.org/xmlns/bed/1.2"',
            ]
            for line in expected:
                self.assertIn(line, content)
            # check additional tags
            expected = [
                b"<ns0:custom>True</ns0:custom>",
                b"<ns0:new_tag>1234</ns0:new_tag>",
                b"<ns0:tX>2013-01-02T13:12:14.600000Z</ns0:tX>",
                b"<ns1:public " b'another_attrib="another_value" ' b'some_attrib="some_value">false</ns1:public>',
            ]
            for line in expected:
                self.assertIn(line, content)
            # now, read again to test if it's parsed correctly..
            cat = _read_quakeml(tmpfile)
        # when reading..
        #  - namespace abbreviations should be disregarded
        #  - we always end up with a namespace definition, even if it was
        #    omitted when originally setting the custom tag
        #  - custom namespace abbreviations should attached to Catalog
        self.assertTrue(hasattr(cat[0], "extra"))

        def _tostr(x):
            if isinstance(x, bool):
                if x:
                    return str("true")
                else:
                    return str("false")
            elif isinstance(x, AttribDict):
                for key, value in x.items():
                    x[key].value = _tostr(value["value"])
                return x
            else:
                return str(x)

        for key, value in my_extra.items():
            my_extra[key]["value"] = _tostr(value["value"])
        self.assertEqual(cat[0].extra, my_extra)
        self.assertTrue(hasattr(cat[0].picks[0], "extra"))
        self.assertEqual(cat[0].picks[0].extra, {"weight": {"value": "2", "namespace": "http://test.org/xmlns/0.1"}})
        self.assertTrue(hasattr(cat, "nsmap"))
        self.assertEqual(getattr(cat, "nsmap")["ns0"], nsmap["ns0"])
Beispiel #39
0
def _read_y(filename, headonly=False, **kwargs):  # @UnusedVariable
    """
    Reads a Nanometrics Y file and returns an ObsPy Stream object.

    .. warning::
        This function should NOT be called directly, it registers via the
        ObsPy :func:`~obspy.core.stream.read` function, call this instead.

    :type filename: str
    :param filename: Nanometrics Y file to be read.
    :type headonly: bool, optional
    :param headonly: If set to True, read only the head. This is most useful
        for scanning available data in huge (temporary) data sets.
    :rtype: :class:`~obspy.core.stream.Stream`
    :return: A ObsPy Stream object.

    .. rubric:: Example

    >>> from obspy import read
    >>> st = read("/path/to/YAYT_BHZ_20021223.124800")
    >>> st  # doctest: +ELLIPSIS
    <obspy.core.stream.Stream object at 0x...>
    >>> print(st)  # doctest: +ELLIPSIS
    1 Trace(s) in Stream:
    .AYT..BHZ | 2002-12-23T12:48:00.000100Z - ... | 100.0 Hz, 18000 samples
    """
    # The first tag in a Y-file must be the TAG_Y_FILE (0) tag. This must be
    # followed by the following tags, in any order:
    #   TAG_STATION_INFO (1)
    #   TAG_STATION_LOCATION (2)
    #   TAG_STATION_PARAMETERS (3)
    #   TAG_STATION_DATABASE (4)
    #   TAG_SERIES_INFO (5)
    #   TAG_SERIES_DATABASE (6)
    # The following tag is optional:
    #   TAG_STATION_RESPONSE (26)
    # The last tag in the file must be a TAG_DATA_INT32 (7) tag. This tag must
    # be followed by an array of LONG's. The number of entries in the array
    # must agree with what was described in the TAG_SERIES_INFO data.
    with open(filename, "rb") as fh:
        trace = Trace()
        trace.stats.y = AttribDict()
        count = -1
        while True:
            endian, tag_type, next_tag, _next_same = __parse_tag(fh)
            if tag_type == 1:
                # TAG_STATION_INFO
                # UCHAR Update[8]
                #   This field is only used internally for administrative
                #   purposes.  It should always be set to zeroes.
                # UCHAR Station[5] (BLANKPAD)
                #   Station is the five letter SEED format station
                #   identification.
                # UCHAR Location[2] (BLANKPAD)
                #   Location Location is the two letter SEED format location
                #   identification.
                # UCHAR Channel[3] (BLANKPAD)
                #   Channel Channel is the three letter SEED format channel
                #   identification.
                # UCHAR NetworkID[51] (ASCIIZ)
                #   This is some descriptive text identifying the network.
                # UCHAR SiteName[61] (ASCIIZ)
                #   SiteName is some text identifying the site.
                # UCHAR Comment[31] (ASCIIZ)
                #   Comment is any comment for this station.
                # UCHAR SensorType[51] (ASCIIZ)
                #   SensorType is some text describing the type of sensor used
                #   at the station.
                # UCHAR DataFormat[7] (ASCIIZ)
                #   DataFormat is some text describing the data format recorded
                #   at the station.
                data = fh.read(next_tag)
                parts = _unpack_with_asciiz_and_decode(b"5s2s3s51z61z31z51z7z", data[8:])
                trace.stats.station = parts[0]
                trace.stats.location = parts[1]
                trace.stats.channel = parts[2]
                # extra
                params = AttribDict()
                params.network_id = parts[3]
                params.side_name = parts[4]
                params.comment = parts[5]
                params.sensor_type = parts[6]
                params.data_format = parts[7]
                trace.stats.y.tag_station_info = params
            elif tag_type == 2:
                # TAG_STATION_LOCATION
                # UCHAR Update[8]
                #   This field is only used internally for administrative
                #   purposes.  It should always be set to zeroes.
                # FLOAT Latitude
                #   Latitude in degrees of the location of the station. The
                #   latitude should be between -90 (South) and +90 (North).
                # FLOAT Longitude
                #   Longitude in degrees of the location of the station. The
                #   longitude should be between -180 (West) and +180 (East).
                # FLOAT Elevation
                #   Elevation in meters above sea level of the station.
                # FLOAT Depth
                #   Depth is the depth in meters of the sensor.
                # FLOAT Azimuth
                #   Azimuth of the sensor in degrees clockwise.
                # FLOAT Dip
                #   Dip is the dip of the sensor. 90 degrees is defined as
                #   vertical right way up.
                data = fh.read(next_tag)
                parts = _unpack_with_asciiz_and_decode(endian + b"ffffff", data[8:])
                params = AttribDict()
                params.latitude = parts[0]
                params.longitude = parts[1]
                params.elevation = parts[2]
                params.depth = parts[3]
                params.azimuth = parts[4]
                params.dip = parts[5]
                trace.stats.y.tag_station_location = params
            elif tag_type == 3:
                # TAG_STATION_PARAMETERS
                # UCHAR Update[16]
                #   This field is only used internally for administrative
                #   purposes.  It should always be set to zeroes.
                # REALTIME StartValidTime
                #   Time that the information in these records became valid.
                # REALTIME EndValidTime
                #   Time that the information in these records became invalid.
                # FLOAT Sensitivity
                #   Sensitivity of the sensor in nanometers per bit.
                # FLOAT SensFreq
                #   Frequency at which the sensitivity was measured.
                # FLOAT SampleRate
                #   This is the number of samples per second. This value can be
                #   less than 1.0. (i.e. 0.1)
                # FLOAT MaxClkDrift
                #   Maximum drift rate of the clock in seconds per sample.
                # UCHAR SensUnits[24] (ASCIIZ)
                #   Some text indicating the units in which the sensitivity was
                #   measured.
                # UCHAR CalibUnits[24] (ASCIIZ)
                #   Some text indicating the units in which calibration input
                #   was measured.
                # UCHAR ChanFlags[27] (BLANKPAD)
                #   Text indicating the channel flags according to the SEED
                #   definition.
                # UCHAR UpdateFlag
                #   This flag must be “N” or “U” according to the SEED
                #   definition.
                # UCHAR Filler[4]
                #   Filler Pads out the record to satisfy the alignment
                #   restrictions for reading data on a SPARC processor.
                data = fh.read(next_tag)
                parts = _unpack_with_asciiz_and_decode(endian + b"ddffff24z24z27sc4s", data[16:])
                trace.stats.sampling_rate = parts[4]
                # extra
                params = AttribDict()
                params.start_valid_time = parts[0]
                params.end_valid_time = parts[1]
                params.sensitivity = parts[2]
                params.sens_freq = parts[3]
                params.sample_rate = parts[4]
                params.max_clk_drift = parts[5]
                params.sens_units = parts[6]
                params.calib_units = parts[7]
                params.chan_flags = parts[8]
                params.update_flag = parts[9]
                trace.stats.y.tag_station_parameters = params
            elif tag_type == 4:
                # TAG_STATION_DATABASE
                # UCHAR Update[8]
                #   This field is only used internally for administrative
                #   purposes.  It should always be set to zeroes.
                # REALTIME LoadDate
                #   Date the information was loaded into the database.
                # UCHAR Key[16]
                #   Unique key that identifies this record in the database.
                data = fh.read(next_tag)
                parts = _unpack_with_asciiz_and_decode(endian + b"d16s", data[8:])
                params = AttribDict()
                params.load_date = parts[0]
                params.key = parts[1]
                trace.stats.y.tag_station_database = params
            elif tag_type == 5:
                # TAG_SERIES_INFO
                # UCHAR Update[16]
                #   This field is only used internally for administrative
                #   purposes.  It should always be set to zeroes.
                # REALTIME StartTime
                #   This is start time of the data in this series.
                # REALTIME EndTime
                #   This is end time of the data in this series.
                # ULONG NumSamples
                #   This is the number of samples of data in this series.
                # LONG DCOffset
                #   DCOffset is the DC offset of the data.
                # LONG MaxAmplitude
                #   MaxAmplitude is the maximum amplitude of the data.
                # LONG MinAmplitude
                #   MinAmplitude is the minimum amplitude of the data.
                # UCHAR Format[8] (ASCIIZ)
                #   This is the format of the data. This should always be
                #   “YFILE”.
                # UCHAR FormatVersion[8] (ASCIIZ)
                #   FormatVersion is the version of the format of the data.
                #   This should always be “5.0”
                data = fh.read(next_tag)
                parts = _unpack_with_asciiz_and_decode(endian + b"ddLlll8z8z", data[16:])
                trace.stats.starttime = UTCDateTime(parts[0])
                count = parts[2]
                # extra
                params = AttribDict()
                params.endtime = UTCDateTime(parts[1])
                params.num_samples = parts[2]
                params.dc_offset = parts[3]
                params.max_amplitude = parts[4]
                params.min_amplitude = parts[5]
                params.format = parts[6]
                params.format_version = parts[7]
                trace.stats.y.tag_series_info = params
            elif tag_type == 6:
                # TAG_SERIES_DATABASE
                # UCHAR Update[8]
                #   This field is only used internally for administrative
                #   purposes.  It should always be set to zeroes.
                # REALTIME LoadDate
                #   Date the information was loaded into the database.
                # UCHAR Key[16]
                #   Unique key that identifies this record in the database.
                data = fh.read(next_tag)
                parts = _unpack_with_asciiz_and_decode(endian + b"d16s", data[8:])
                params = AttribDict()
                params.load_date = parts[0]
                params.key = parts[1]
                trace.stats.y.tag_series_database = params
            elif tag_type == 26:
                # TAG_STATION_RESPONSE
                # UCHAR Update[8]
                #   This field is only used internally for administrative
                #   purposes.  It should always be set to zeroes.
                # UCHAR PathName[260]
                #  PathName is the full name of the file which contains the
                #  response information for this station.
                data = fh.read(next_tag)
                parts = _unpack_with_asciiz_and_decode(b"260s", data[8:])
                params = AttribDict()
                params.path_name = parts[0]
                trace.stats.y.tag_station_response = params
            elif tag_type == 7:
                # TAG_DATA_INT32
                trace.data = from_buffer(fh.read(np.dtype(np.int32).itemsize * count), dtype=np.int32)
                # break loop as TAG_DATA_INT32 should be the last tag in file
                break
            else:
                fh.seek(next_tag, 1)
    return Stream([trace])
Beispiel #40
0
def rfstats(stats=None, event=None, station=None, stream=None,
            phase='P', dist_range=None, tt_model='iasp91',
            pp_depth=None, pp_phase=None, model='iasp91'):
    """
    Calculate ray specific values like slowness for given event and station.

    :param stats: stats object with event and/or station attributes. Can be
        None if both event and station are given.
    :param event: ObsPy :class:`~obspy.core.event.Event` object
    :param station: station object with attributes latitude, longitude and
        elevation
    :param stream: If a stream is given, stats has to be None. In this case
        rfstats will be called for every stats object in the stream.
    :param phase: string with phase. Usually this will be 'P' or
        'S' for P and S receiver functions, respectively.
    :type dist_range: tuple of length 2
    :param dist_range: if epicentral of event is not in this intervall, None
        is returned by this function,\n
        if phase == 'P' defaults to (30, 90),\n
        if phase == 'S' defaults to (50, 85)
    :param tt_model: model for travel time calculation.
        (see the :mod:`obspy.taup` module, default: iasp91)
    :param pp_depth: Depth for piercing point calculation
        (in km, default: None -> No calculation)
    :param pp_phase: Phase for pp calculation (default: 'S' for P-receiver
        function and 'P' for S-receiver function)
    :param model': Path to model file for pp calculation
        (see :class:`~rf.simple_model.SimpleModel`, default: iasp91)
    :return: ``stats`` object with event and station attributes, distance,
        back_azimuth, inclination, onset and slowness or None if epicentral
        distance is not in the given intervall
    """
    if stream is not None:
        assert stats is None
        kwargs = {'event': event, 'station': station, 'stream':None,
                  'phase': phase, 'dist_range': dist_range,
                  'tt_model':tt_model, 'pp_depth': pp_depth,
                  'pp_phase': pp_phase, 'model': model}
        for tr in stream:
            rfstats(stats=tr.stats, **kwargs)
        return
    phase = phase.upper()
    if dist_range is None and phase in 'PS':
        dist_range = (30, 90) if phase == 'P' else (50, 85)
    if stats is None:
        stats = AttribDict({})
    if event is not None and station is not None:
        stats.update(obj2stats(event=event, station=station))
    dist, baz, _ = gps2DistAzimuth(stats.station_latitude,
                                   stats.station_longitude,
                                   stats.event_latitude,
                                   stats.event_longitude)
    dist = kilometer2degrees(dist / 1000)
    if dist_range and not dist_range[0] <= dist <= dist_range[1]:
        return
    tt_model = TauPyModel(model=tt_model)
    arrivals = tt_model.get_travel_times(stats.event_depth, dist, (phase,))
    if len(arrivals) == 0:
        raise Exception('TauPy does not return phase %s at distance %s' %
                        (phase, dist))
    if len(arrivals) > 1:
        from warnings import warn
        msg = ('TauPy returns more than one arrival for phase %s at '
               'distance -> take first arrival' )
        warn(msg % (phase, dist))
    arrival = arrivals[0]
    onset = stats.event_time + arrival.time
    inc = arrival.incident_angle
    slowness = arrival.ray_param_sec_degree
    stats.update({'distance': dist, 'back_azimuth': baz, 'inclination': inc,
                  'onset': onset, 'slowness': slowness})
    if pp_depth is not None:
        model = load_model(model)
        if pp_phase is None:
            pp_phase = 'S' if phase.upper().endswith('P') else 'P'
        model.ppoint(stats, pp_depth, phase=pp_phase)
    return stats
Beispiel #41
0
    def test_write_with_extra_tags_and_read(self):
        """
        Tests that a QuakeML file with additional custom "extra" tags gets
        written correctly and that when reading it again the extra tags are
        parsed correctly.
        """
        filename = os.path.join(self.path, "quakeml_1.2_origin.xml")

        with warnings.catch_warnings(record=True) as w:
            warnings.simplefilter("always")
            cat = _read_quakeml(filename)
            self.assertEqual(len(w), 0)

        # add some custom tags to first event:
        #  - tag with explicit namespace but no explicit ns abbreviation
        #  - tag without explicit namespace (gets obspy default ns)
        #  - tag with explicit namespace and namespace abbreviation
        my_extra = AttribDict(
            {'public': {'value': False,
                        'namespace': 'http://some-page.de/xmlns/1.0',
                        'attrib': {'some_attrib': 'some_value',
                                   'another_attrib': 'another_value'}},
             'custom': {'value': 'True',
                        'namespace': 'http://test.org/xmlns/0.1'},
             'new_tag': {'value': 1234,
                         'namespace': 'http://test.org/xmlns/0.1'},
             'tX': {'value': UTCDateTime('2013-01-02T13:12:14.600000Z'),
                    'namespace': 'http://test.org/xmlns/0.1'},
             'dataid': {'namespace': 'http://anss.org/xmlns/catalog/0.1',
                        'type': 'attribute', 'value': '00999999'},
             # some nested tags :
             'quantity': {'namespace': 'http://some-page.de/xmlns/1.0',
                          'attrib': {'attrib1': 'attrib_value1',
                                     'attrib2': 'attrib_value2'},
                          'value': {
                              'my_nested_tag1': {
                                  'namespace': 'http://some-page.de/xmlns/1.0',
                                  'value': 1.23E10},
                              'my_nested_tag2': {
                                  'namespace': 'http://some-page.de/xmlns/1.0',
                                  'value': False}}}})
        nsmap = {'ns0': 'http://test.org/xmlns/0.1',
                 'catalog': 'http://anss.org/xmlns/catalog/0.1'}
        cat[0].extra = my_extra.copy()
        # insert a pick with an extra field
        p = Pick()
        p.extra = {'weight': {'value': 2,
                              'namespace': 'http://test.org/xmlns/0.1'}}
        cat[0].picks.append(p)

        with NamedTemporaryFile() as tf:
            tmpfile = tf.name
            # write file
            cat.write(tmpfile, format='QUAKEML', nsmap=nsmap)
            # check contents
            with open(tmpfile, 'rb') as fh:
                # enforce reproducible attribute orders through write_c14n
                obj = etree.fromstring(fh.read()).getroottree()
                buf = io.BytesIO()
                obj.write_c14n(buf)
                buf.seek(0, 0)
                content = buf.read()
            # check namespace definitions in root element
            expected = [b'<q:quakeml',
                        b'xmlns:catalog="http://anss.org/xmlns/catalog/0.1"',
                        b'xmlns:ns0="http://test.org/xmlns/0.1"',
                        b'xmlns:ns1="http://some-page.de/xmlns/1.0"',
                        b'xmlns:q="http://quakeml.org/xmlns/quakeml/1.2"',
                        b'xmlns="http://quakeml.org/xmlns/bed/1.2"']
            for line in expected:
                self.assertIn(line, content)
            # check additional tags
            expected = [
                b'<ns0:custom>True</ns0:custom>',
                b'<ns0:new_tag>1234</ns0:new_tag>',
                b'<ns0:tX>2013-01-02T13:12:14.600000Z</ns0:tX>',
                b'<ns1:public '
                b'another_attrib="another_value" '
                b'some_attrib="some_value">false</ns1:public>'
            ]
            for line in expected:
                self.assertIn(line, content)
            # now, read again to test if it's parsed correctly..
            cat = _read_quakeml(tmpfile)
        # when reading..
        #  - namespace abbreviations should be disregarded
        #  - we always end up with a namespace definition, even if it was
        #    omitted when originally setting the custom tag
        #  - custom namespace abbreviations should attached to Catalog
        self.assertTrue(hasattr(cat[0], 'extra'))

        def _tostr(x):
            if isinstance(x, bool):
                if x:
                    return str('true')
                else:
                    return str('false')
            elif isinstance(x, AttribDict):
                for key, value in x.items():
                    x[key].value = _tostr(value['value'])
                return x
            else:
                return str(x)

        for key, value in my_extra.items():
            my_extra[key]['value'] = _tostr(value['value'])
        self.assertEqual(cat[0].extra, my_extra)
        self.assertTrue(hasattr(cat[0].picks[0], 'extra'))
        self.assertEqual(
            cat[0].picks[0].extra,
            {'weight': {'value': '2',
                        'namespace': 'http://test.org/xmlns/0.1'}})
        self.assertTrue(hasattr(cat, 'nsmap'))
        self.assertEqual(getattr(cat, 'nsmap')['ns0'], nsmap['ns0'])
Beispiel #42
0
 def test_setdefault(self):
     """
     Tests setdefault method of AttribDict class.
     """
     ad = AttribDict()
     # 1
     default = ad.setdefault('test', 'NEW')
     self.assertEqual(default, 'NEW')
     self.assertEqual(ad['test'], 'NEW')
     self.assertEqual(ad.test, 'NEW')
     self.assertEqual(ad.get('test'), 'NEW')
     self.assertEqual(ad.__getattr__('test'), 'NEW')
     self.assertEqual(ad.__getitem__('test'), 'NEW')
     self.assertEqual(ad.__dict__['test'], 'NEW')
     self.assertEqual(ad.__dict__.get('test'), 'NEW')
     self.assertTrue('test' in ad)
     self.assertTrue('test' in ad.__dict__)
     # 2 - existing key should not be overwritten
     default = ad.setdefault('test', 'SOMETHINGDIFFERENT')
     self.assertEqual(default, 'NEW')
     self.assertEqual(ad['test'], 'NEW')
     self.assertEqual(ad.test, 'NEW')
     self.assertEqual(ad.get('test'), 'NEW')
     self.assertEqual(ad.__getattr__('test'), 'NEW')
     self.assertEqual(ad.__getitem__('test'), 'NEW')
     self.assertEqual(ad.__dict__['test'], 'NEW')
     self.assertEqual(ad.__dict__.get('test'), 'NEW')
     self.assertTrue('test' in ad)
     self.assertTrue('test' in ad.__dict__)
     # 3 - default value isNone
     ad = AttribDict()
     default = ad.setdefault('test')
     self.assertEqual(default, None)
     self.assertEqual(ad['test'], None)
     self.assertEqual(ad.test, None)
     self.assertEqual(ad.get('test'), None)
     self.assertEqual(ad.__getattr__('test'), None)
     self.assertEqual(ad.__getitem__('test'), None)
     self.assertEqual(ad.__dict__['test'], None)
     self.assertEqual(ad.__dict__.get('test'), None)
     self.assertTrue('test' in ad)
     self.assertTrue('test' in ad.__dict__)