Exemple #1
0
def find_coord(path_to_xml):
    sta = path_to_xml.split('/')[-1].split('.')[1]
    
    if not os.path.exists(path_to_xml):
        try:
            msg = 'stationxml file not found, trying to download from FDSN...'
            warn(msg)
            get_staxml(path_to_xml.split('/')[-1].split('.')[0],sta)
            
            inv = read_inventory(path_to_xml)
        
            sta = inv[0][0]
            staname = sta.code
            lat=sta.latitude
            lon=sta.longitude
            return str(staname), float(lat),float(lon)
        except:
            msg='Could not download stationxml file: Could not retrieve coordinates.'
            warn(msg)       
            return '000',0,0  
        
        #inf = read_xml(path_to_xml)
        
    else:
        try:
            inv = read_inventory(path_to_xml)
            print inv
        except KeyError: 
            msg='Faulty stationxml file: Could not retrieve coordinates.'
            warn(msg)       
            return '000',0,0
Exemple #2
0
def get_geoinf(id1,id2):


		inv1 = '{}.{}.xml'.format(*id1.split('.')[0:2])
		inv2 = '{}.{}.xml'.format(*id2.split('.')[0:2])

		inv1 = read_inventory(os.path.join('meta','stationxml',inv1))
		inv2 = read_inventory(os.path.join('meta','stationxml',inv2))

		# Replace 'radial' and 'transverse' by 'N' and 'E'
		id1 = re.sub('\.??R$','N',id1)
		id2 = re.sub('\.??R$','N',id2)
		id1 = re.sub('\.??T$','E',id1)
		id2 = re.sub('\.??T$','E',id2)
		

		c1 = inv1.get_coordinates(id1)
		c2 = inv2.get_coordinates(id2)

		lat1, lon1, lat2, lon2 = (
			c1['latitude'],
			c1['longitude'],
			c2['latitude'],
			c2['longitude'])

		dist, az, baz = gps2dist_azimuth(lat1,lon1,lat2,lon2)

		return lat1, lon1, lat2, lon2, dist, az, baz
Exemple #3
0
def test_dot_accessors(example_data_set):
    """
    Tests the dot accessors for waveforms and stations.
    """
    data_path = os.path.join(data_dir, "small_sample_data_set")
    data_set = ASDFDataSet(example_data_set.filename)

    # Get the contents, this also asserts that tab completions works.
    assert sorted(dir(data_set.waveforms)) == ["AE_113A", "TA_POKR"]
    assert sorted(dir(data_set.waveforms.AE_113A)) == ["StationXML", "raw_recording"]
    assert sorted(dir(data_set.waveforms.TA_POKR)) == ["StationXML", "raw_recording"]

    # Actually check the contents.
    waveform = data_set.waveforms.AE_113A.raw_recording.sort()
    waveform_file = obspy.read(os.path.join(data_path, "AE.*.mseed")).sort()
    for trace in waveform_file:
        del trace.stats.mseed
        del trace.stats._format
    for trace in waveform:
        del trace.stats.asdf
        del trace.stats._format
    assert waveform == waveform_file

    waveform = data_set.waveforms.TA_POKR.raw_recording.sort()
    waveform_file = obspy.read(os.path.join(data_path, "TA.*.mseed")).sort()
    for trace in waveform_file:
        del trace.stats.mseed
        del trace.stats._format
    for trace in waveform:
        del trace.stats.asdf
        del trace.stats._format
    assert waveform == waveform_file

    assert data_set.waveforms.AE_113A.StationXML == obspy.read_inventory(os.path.join(data_path, "AE.113A..BH*.xml"))
    assert data_set.waveforms.TA_POKR.StationXML == obspy.read_inventory(os.path.join(data_path, "TA.POKR..BH*.xml"))
Exemple #4
0
 def test_warning_when_blockette_57_is_not_followed_by_58(self):
     filename = os.path.join(self.data_path, "RESP.decimation_without_gain")
     # Fail if responses are explicitly not skipped.
     with warnings.catch_warnings(record=True):
         with self.assertRaises(InvalidResponseError) as e:
             obspy.read_inventory(filename, skip_invalid_responses=False)
     self.assertEqual(
         e.exception.args[0],
         "Stage 1: A decimation stage with blockette 57 must be followed "
         "by a blockette 58 which is missing here.")
     # Otherwise continue, but raise a warning.
     with warnings.catch_warnings(record=True) as w:
         warnings.simplefilter("always")
         obspy.read_inventory(filename)
     # This triggers a number of warnings as the file is constructed and
     # misses all kinds of information.
     self.assertGreaterEqual(len(w), 1)
     msg = ("Failed to calculate response for XX.ABC..BHX with epoch "
            "1999-12-16T02:14:00.000000Z - 1999-12-21T19:10:59.000000Z "
            "because: Stage 1: A decimation stage with blockette 57 must "
            "be followed by a blockette 58 which is missing here.")
     for _w in w:
         if _w.message.args[0] == msg:
             break
     else:
         raise AssertionError("Could not find warning to test for.")
def test_merge_inventories():
    """
    Silly test, merging the same inventory twice should result in the same
    as the test_merging_stations() test.
    """
    inv = obspy.read_inventory(os.path.join(data_dir, "big_station.xml"),
                               format="stationxml")
    original_inv = copy.deepcopy(inv)
    inv_2 = obspy.read_inventory(os.path.join(data_dir, "big_station.xml"),
                                 format="stationxml")

    assert len(inv.networks) == 2
    assert len(inv.select(network="BW")[0].stations) == 3

    new_inv = merge_inventories(inv, inv_2, network_id="BW", station_id="RJOB")

    # The inventory object should also not be touched.
    assert inv == original_inv

    assert len(new_inv.networks) == 1
    assert len(new_inv[0].stations) == 1
    assert new_inv[0].code == "BW"
    assert new_inv[0][0].code == "RJOB"

    # Make sure the station dates have been set correctly.
    assert new_inv[0][0].start_date == \
        obspy.UTCDateTime("2001-05-15T00:00:00.000000Z")
    assert new_inv[0][0].end_date is None

    # The 9 channels should remain.
    assert len(new_inv[0][0].channels) == 9
Exemple #6
0
    def test_read_invalid_filename(self):
        """
        Tests that we get a sane error message when calling read_inventory()
        with a filename that doesn't exist
        """
        doesnt_exist = 'dsfhjkfs'
        for i in range(10):
            if os.path.exists(doesnt_exist):
                doesnt_exist += doesnt_exist
                continue
            break
        else:
            self.fail('unable to get invalid file path')
        doesnt_exist = native_str(doesnt_exist)

        if PY2:
            exception_type = getattr(builtins, 'IOError')
        else:
            exception_type = getattr(builtins, 'FileNotFoundError')
        exception_msg = "[Errno 2] No such file or directory: '{}'"

        formats = _get_entry_points(
            'obspy.plugin.inventory', 'readFormat').keys()
        # try read_inventory() with invalid filename for all registered read
        # plugins and also for filetype autodiscovery
        formats = [None] + list(formats)
        for format in formats[:1]:
            with self.assertRaises(exception_type) as e:
                read_inventory(doesnt_exist, format=format)
            self.assertEqual(
                str(e.exception), exception_msg.format(doesnt_exist))
Exemple #7
0
    def get_response(self, datalogger_keys, sensor_keys):
        """
        Get Response from NRL tree structure

        :param datalogger_keys: List of data-loggers.
        :type datalogger_keys: list[str]
        :param sensor_keys: List of sensors.
        :type sensor_keys: list[str]
        :rtype: :class:`~obspy.core.inventory.response.Response`

        >>> nrl = NRL()
        >>> response = nrl.get_response(
        ...     sensor_keys=['Nanometrics', 'Trillium Compact', '120 s'],
        ...     datalogger_keys=['REF TEK', 'RT 130 & 130-SMA', '1', '200'])
        >>> print(response)   # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
        Channel Response
          From M/S (Velocity in Meters per Second) to COUNTS (Digital Counts)
          Overall Sensitivity: 4.74576e+08 defined at 1.000 Hz
          10 stages:
            Stage 1: PolesZerosResponseStage from M/S to V, gain: 754.3
            Stage 2: ResponseStage from V to V, gain: 1
            Stage 3: Coefficients... from V to COUNTS, gain: 629129
            Stage 4: Coefficients... from COUNTS to COUNTS, gain: 1
            Stage 5: Coefficients... from COUNTS to COUNTS, gain: 1
            Stage 6: Coefficients... from COUNTS to COUNTS, gain: 1
            Stage 7: Coefficients... from COUNTS to COUNTS, gain: 1
            Stage 8: Coefficients... from COUNTS to COUNTS, gain: 1
            Stage 9: Coefficients... from COUNTS to COUNTS, gain: 1
            Stage 10: Coefficients... from COUNTS to COUNTS, gain: 1
        """
        # Parse both to inventory objects.
        with io.BytesIO(
                self.get_datalogger_resp(datalogger_keys).encode()) as buf:
            buf.seek(0, 0)
            dl_resp = obspy.read_inventory(buf, format="RESP")
        with io.BytesIO(
                self.get_sensor_resp(sensor_keys).encode()) as buf:
            buf.seek(0, 0)
            sensor_resp = obspy.read_inventory(buf, format="RESP")

        # Both can by construction only contain a single channel with a
        # response object.
        dl_resp = dl_resp[0][0][0].response
        sensor_resp = sensor_resp[0][0][0].response

        # Combine both by replace stage one in the data logger with stage
        # one of the sensor.
        dl_resp.response_stages.pop(0)
        dl_resp.response_stages.insert(0, sensor_resp.response_stages[0])
        try:
            dl_resp.recalculate_overall_sensitivity()
        except ValueError:
            msg = "Failed to recalculate overall sensitivity."
            warnings.warn(msg)

        return dl_resp
Exemple #8
0
    def test_station_bulk(self):
        """
        Test bulk station requests, POSTing data to server. Also tests
        authenticated bulk request.

        Does currently only test reading from a list of list. The other
        input types are tested with the waveform bulk downloader and thus
        should work just fine.
        """
        clients = [self.client, self.client_auth]
        # test cases for providing lists of lists
        starttime = UTCDateTime(1990, 1, 1)
        endtime = UTCDateTime(1990, 1, 1) + 10
        bulk = [
            ["IU", "ANMO", "", "BHE", starttime, endtime],
            ["IU", "CCM", "", "BHZ", starttime, endtime],
            ["IU", "COR", "", "UHZ", starttime, endtime],
            ["IU", "HRV", "", "LHN", starttime, endtime],
        ]
        for client in clients:
            # Test with station level.
            inv = client.get_stations_bulk(bulk, level="station")
            # Test with output to file.
            with NamedTemporaryFile() as tf:
                client.get_stations_bulk(bulk, filename=tf.name, level="station")
                inv2 = read_inventory(tf.name, format="stationxml")

            self.assertEqual(inv.networks, inv2.networks)
            self.assertEqual(len(inv.networks), 1)
            self.assertEqual(inv[0].code, "IU")
            self.assertEqual(len(inv.networks[0].stations), 4)
            self.assertEqual(
                sorted([_i.code for _i in inv.networks[0].stations]), sorted(["ANMO", "CCM", "COR", "HRV"])
            )

            # Test with channel level.
            inv = client.get_stations_bulk(bulk, level="channel")
            # Test with output to file.
            with NamedTemporaryFile() as tf:
                client.get_stations_bulk(bulk, filename=tf.name, level="channel")
                inv2 = read_inventory(tf.name, format="stationxml")

            self.assertEqual(inv.networks, inv2.networks)
            self.assertEqual(len(inv.networks), 1)
            self.assertEqual(inv[0].code, "IU")
            self.assertEqual(len(inv.networks[0].stations), 4)
            self.assertEqual(
                sorted([_i.code for _i in inv.networks[0].stations]), sorted(["ANMO", "CCM", "COR", "HRV"])
            )
            channels = []
            for station in inv[0]:
                for channel in station:
                    channels.append("IU.%s.%s.%s" % (station.code, channel.location_code, channel.code))
            self.assertEqual(sorted(channels), sorted(["IU.ANMO..BHE", "IU.CCM..BHZ", "IU.COR..UHZ", "IU.HRV..LHN"]))
        return
Exemple #9
0
    def test_restrictions(self):
        """
        Tests if the waveform restrictions actually work as expected.
        """
        # No restrictions currently apply - we should get something.
        response = self.client.get('/fdsnws/station/1/query')
        self.assertEqual(response.status_code, 200)
        self.assertTrue('OK' in response.reason_phrase)
        inv = obspy.read_inventory(io.BytesIO(response.getvalue()))
        self.assertEqual(inv.get_contents()["stations"],
                         ["BW.ALTM (Beilngries, Bavaria, BW-Net)"])

        # First add a restriction that does nothing.
        r = Restriction.objects.get_or_create(network="AA", station="BBBB")[0]
        r.users.add(User.objects.filter(username='******')[0])
        r.save()
        # Everything should still work.
        response = self.client.get('/fdsnws/station/1/query')
        self.assertEqual(response.status_code, 200)
        self.assertTrue('OK' in response.reason_phrase)
        inv = obspy.read_inventory(io.BytesIO(response.getvalue()))
        self.assertEqual(inv.get_contents()["stations"],
                         ["BW.ALTM (Beilngries, Bavaria, BW-Net)"])

        # Now add restrictions that does something.
        r = Restriction.objects.get_or_create(network="BW", station="ALTM")[0]
        r.users.add(User.objects.filter(username='******')[0])
        r.save()

        # Now the same query should no longer return something as the
        # station has been restricted.
        response = self.client.get('/fdsnws/station/1/query')
        self.assertEqual(response.status_code, 204)

        # The correct user can still get the stations.
        response = self.client.get('/fdsnws/station/1/queryauth',
                                   **self.valid_auth_headers)
        self.assertEqual(response.status_code, 200)
        self.assertTrue('OK' in response.reason_phrase)
        inv = obspy.read_inventory(io.BytesIO(response.getvalue()))
        self.assertEqual(inv.get_contents()["stations"],
                         ["BW.ALTM (Beilngries, Bavaria, BW-Net)"])

        # Make another user that has not been added to this restriction - he
        # should not be able to retrieve it.
        self.client.logout()
        User.objects.get_or_create(
            username='******', password=make_password('some_dude'))[0]
        credentials = base64.b64encode(b'some_dude:some_dude')
        auth_headers = {
            'HTTP_AUTHORIZATION': 'Basic ' + credentials.decode("ISO-8859-1")
        }
        response = self.client.get('/fdsnws/station/1/queryauth',
                                   **auth_headers)
        self.assertEqual(response.status_code, 204)
Exemple #10
0
 def test_warning_with_no_blockettes_58(self):
     filename = os.path.join(self.data_path, "RESP.repeated_stage_0")
     with warnings.catch_warnings(record=True) as w:
         warnings.simplefilter("always")
         obspy.read_inventory(filename)
     self.assertGreaterEqual(len(w), 1)
     self.assertEqual(
         w[0].message.args[0],
         "Epoch BN.WR0..SHZ "
         "[1996-03-01T00:00:00.000000Z - 1999-01-03T00:00:00.000000Z]: "
         "Channel has multiple (but identical) blockettes 58 for stage 0. "
         "Only one will be used.")
Exemple #11
0
 def test_warning_with_multiple_differing_blockettes_58_in_stage_0(self):
     filename = os.path.join(self.data_path,
                             "RESP.repeated_differing_stage_0")
     with warnings.catch_warnings(record=True) as w:
         warnings.simplefilter("always")
         obspy.read_inventory(filename)
     self.assertGreaterEqual(len(w), 1)
     self.assertEqual(
         w[0].message.args[0],
         "Epoch BN.WR0..SHZ "
         "[1996-03-01T00:00:00.000000Z - 1999-01-03T00:00:00.000000Z]: "
         "Channel has multiple different blockettes 58 for stage 0. The "
         "last one will be chosen - this is a faulty file - try to fix "
         "it!")
Exemple #12
0
    def add_inv(self,input,unit):
        
        
        if input == 'staxml':

            inf = self.ids[0].split('.')[0:2]
            file = '{}.{}.xml'.format(*inf)
            file = os.path.join('meta','stationxml',file)

            self.inv = read_inventory(file)


        elif input == 'resp':

            self.inv = {}

            for id in self.ids:
                inf = id.split('.')
                file = 'RESP.{}.{}.{}.{}'.format(*inf)
                file = os.path.join('meta','resp',file)
                self.inv[id] = {'filename': file,
                'units': unit}
        
        else:
            msg = 'input must be \'resp\' or \'staxml\''
            raise ValueError(msg)
Exemple #13
0
def prepare_real_adj_data():
    st = SAMPLE_STREAM.copy()
    inv = obspy.read_inventory()
    event = obspy.read_events()[0]
    elat = event.preferred_origin().latitude
    elon = event.preferred_origin().longitude

    rotate_stream(st, elat, elon, inv)
    new_z = st.select(component="Z")[0].copy()
    new_z.stats.location = "00"
    st.append(new_z)

    meta = {
        "BW.RJOB..EHZ": {
            "adj_src_type": "waveform_misfit", "misfit": 1.0,
            "min_period": 17.0, "max_period": 40.0},
        "BW.RJOB..EHR": {
            "adj_src_type": "waveform_misfit", "misfit": 2.0,
            "min_period": 17.0, "max_period": 40.0},
        "BW.RJOB..EHT": {
            "adj_src_type": "waveform_misfit", "misfit": 3.0,
            "min_period": 17.0, "max_period": 40.0},
        "BW.RJOB.00.EHZ": {
            "adj_src_type": "waveform_misfit", "misfit": 4.0,
            "min_period": 17.0, "max_period": 40.0}}
    return st, meta
Exemple #14
0
def test_window_on_stream_user_levels():
    obs_tr = read(obsfile)
    syn_tr = read(synfile)

    config_file = os.path.join(DATA_DIR, "window", "27_60.BHZ.config.yaml")
    config = wio.load_window_config_yaml(config_file)
    config_dict = {"Z": config, "R": config, "T": config}

    config_file = os.path.join(DATA_DIR, "window", "27_60.BHZ.config.yaml")
    config = wio.load_window_config_yaml(config_file)

    cat = readEvents(quakeml)
    inv = read_inventory(staxml)

    _mod = "pytomo3d.window.tests.user_module_example"
    user_modules = {"BHZ": _mod, "BHR": _mod, "BHT": _mod}

    windows = win.window_on_stream(obs_tr, syn_tr, config_dict, station=inv,
                                   event=cat, user_modules=user_modules,
                                   _verbose=False,
                                   figure_mode=False)

    assert len(windows) == 3
    nwins = dict((_w, len(windows[_w])) for _w in windows)
    assert nwins == {"IU.KBL..BHR": 5, "IU.KBL..BHZ": 2, "IU.KBL..BHT": 4}
Exemple #15
0
def _generator(events, inventory, rf=False):
    """Generator yielding length at first and then station/event information"""
    inventory = read_inventory(inventory)
    channels = inventory.get_contents()['channels']
    stations = list(set(ch.rsplit('.', 1)[0] for ch in channels))
    one_channel = {ch.rsplit('.', 1)[0]: ch for ch in channels}
    if events is not None:
        events = readEvents(events)
        yield len(stations) * len(events)
        for event in events:
            for station in stations:
                seed_id = one_channel[station][:-1] + '?'
                net, sta, loc, cha = seed_id.split('.')
                stats = {'network': net, 'station': sta, 'location': loc,
                         'channel': cha}
                if rf:
                    stats['event'] = event
                    stats['seed_id'] = seed_id
                    coords = inventory.get_coordinates(one_channel[station])
                    yield stats, event, coords
                else:
                    stats['event_time'] = event.preferred_origin()['time']
                    yield stats
    else:
        yield len(stations)
        for station in stations:
            net, sta, loc, cha = one_channel[station].split('.')
            stats = {'network': net, 'station': sta, 'location': loc,
                     'channel': cha[:-1] + '?',
                     'event_time': _DummyUTC()}
            yield stats
Exemple #16
0
 def test_len(self):
     """
     Tests the __len__ property.
     """
     net = read_inventory()[0]
     self.assertEqual(len(net), len(net.stations))
     self.assertEqual(len(net), 2)
def test_merging_stations():
    """
    Tests reading a StationXML file with a couple of networks and duplicate
    stations and merging it.
    """
    inv = obspy.read_inventory(os.path.join(data_dir, "big_station.xml"),
                               format="stationxml")
    original_inv = copy.deepcopy(inv)

    assert len(inv.networks) == 2
    assert len(inv.select(network="BW")[0].stations) == 3

    new_inv = isolate_and_merge_station(inv, network_id="BW",
                                        station_id="RJOB")

    # The inventory object should also not be touched.
    assert inv == original_inv

    assert len(new_inv.networks) == 1
    assert len(new_inv[0].stations) == 1
    assert new_inv[0].code == "BW"
    assert new_inv[0][0].code == "RJOB"

    # Make sure the station dates have been set correctly.
    assert new_inv[0][0].start_date == \
        obspy.UTCDateTime("2001-05-15T00:00:00.000000Z")
    assert new_inv[0][0].end_date is None

    # The 9 channels should remain.
    assert len(new_inv[0][0].channels) == 9
Exemple #18
0
def test_process_synt():
    staxmlfile = os.path.join(DATA_DIR, "stationxml", "IU.KBL.syn.xml")
    inv = obspy.read_inventory(staxmlfile)

    st = testsyn.copy()
    event = obspy.readEvents(testquakeml)[0]
    origin = event.preferred_origin() or event.origins[0]
    event_lat = origin.latitude
    event_lon = origin.longitude
    event_time = origin.time

    pre_filt = [1/90., 1/60., 1/27.0, 1/22.5]
    t1 = event_time
    t2 = event_time + 6000.0
    st_new = proc.process_stream(
        st, remove_response_flag=False, inventory=inv,
        filter_flag=True, pre_filt=pre_filt,
        starttime=t1, endtime=t2, resample_flag=True,
        sampling_rate=2.0, taper_type="hann",
        taper_percentage=0.05, rotate_flag=True,
        event_latitude=event_lat,
        event_longitude=event_lon)
    bmfile = os.path.join(DATA_DIR, "proc", "IU.KBL.syn.proc.mseed")
    st_compare = obspy.read(bmfile)
    assert compare_stream_kernel(st_new, st_compare)
Exemple #19
0
    def test_reconstructing_stage_0_from_other_blockettes(self):
        # This file has no stage 0 but a bunch of other blockettes 58 from
        # other stages. Try to reconstruct stage 0.
        filename = os.path.join(self.data_path, "RESP.JM.NMIA0.00.HHN")

        frequencies = np.logspace(-3, 3, 100)
        t = obspy.UTCDateTime(2015, 1, 1)

        # Should raise no warnings.
        with warnings.catch_warnings(record=True) as w:
            warnings.simplefilter("always")
            inv = obspy.read_inventory(filename)
        self.assertEqual(len(w), 0)

        self.assertEqual(inv.get_contents()["channels"], ["JM.NMIA0.00.HNN"])

        # Also check the responses via the inventory objects and by directly
        # calling evalresp.
        for unit in ("DISP", "VEL", "ACC"):
            e_r = evalresp_for_frequencies(
                t_samp=None, frequencies=frequencies, filename=filename,
                date=t, units=unit)
            i_r = inv[0][0][0].response.get_evalresp_response_for_frequencies(
                frequencies=frequencies, output=unit)
            np.testing.assert_equal(e_r, i_r)
Exemple #20
0
    def test_network_select_with_empty_stations(self):
        """
        Tests the behaviour of the Network.select() method for empty stations.
        """
        net = read_inventory()[0]

        # Delete all channels.
        for sta in net:
            sta.channels = []

        # 2 stations and 0 channels remain.
        self.assertEqual(len(net), 2)
        self.assertEqual(sum(len(sta) for sta in net), 0)

        # No arguments, everything should be selected.
        self.assertEqual(len(net.select()), 2)

        # Everything selected, nothing should happen.
        self.assertEqual(len(net.select(station="*")), 2)

        # Only select a single station.
        self.assertEqual(len(net.select(station="FUR")), 1)
        self.assertEqual(len(net.select(station="FU?")), 1)
        self.assertEqual(len(net.select(station="W?T")), 1)

        # Once again, this time with the time selection.
        self.assertEqual(len(net.select(time=UTCDateTime(2006, 1, 1))), 0)
        self.assertEqual(len(net.select(time=UTCDateTime(2007, 1, 1))), 1)
        self.assertEqual(len(net.select(time=UTCDateTime(2008, 1, 1))), 2)
Exemple #21
0
def test_update_user_levels():
    obs_tr = read(obsfile)[0]
    syn_tr = read(synfile)[0]

    config_file = os.path.join(DATA_DIR, "window", "27_60.BHZ.config.yaml")
    config = wio.load_window_config_yaml(config_file)

    cat = readEvents(quakeml)
    inv = read_inventory(staxml)

    user_module = "pytomo3d.window.tests.user_module_example"
    config = win.update_user_levels(user_module, config, inv, cat,
                                    obs_tr, syn_tr)

    npts = obs_tr.stats.npts
    assert isinstance(config.stalta_waterlevel, np.ndarray)
    assert len(config.stalta_waterlevel) == npts
    assert isinstance(config.tshift_acceptance_level, np.ndarray)
    assert len(config.tshift_acceptance_level) == npts
    assert isinstance(config.dlna_acceptance_level, np.ndarray)
    assert len(config.dlna_acceptance_level) == npts
    assert isinstance(config.cc_acceptance_level, np.ndarray)
    assert len(config.cc_acceptance_level) == npts
    assert isinstance(config.s2n_limit, np.ndarray)
    assert len(config.s2n_limit) == npts
Exemple #22
0
    def test_response_with_no_units_in_stage_1(self):
        """
        ObsPy has some heuristics to deal with this particular degenerate case.
        Test it here.
        """
        inv = read_inventory(os.path.join(
            self.data_dir, "stationxml_no_units_in_stage_1.xml"))
        r = inv[0][0][0].response
        self.assertIsNone(r.response_stages[0].input_units)
        self.assertIsNone(r.response_stages[0].output_units)

        with warnings.catch_warnings(record=True) as w:
            warnings.simplefilter("always")
            out = r.get_evalresp_response_for_frequencies(
                np.array([0.5, 1.0, 2.0]), output="DISP")

        self.assertEqual(len(w), 2)
        self.assertEqual(
            w[0].message.args[0],
            "Set the input units of stage 1 to the overall input units.")
        self.assertEqual(
            w[1].message.args[0],
            "Set the output units of stage 1 to the input units of stage 2.")

        # Values compared to evalresp output from RESP file - might not be
        # right but it does guarantee that ObsPy behaves like evalresp - be
        # that a good thing or a bad thing.
        np.testing.assert_allclose(
            out, [0 + 9869.2911771081963j, 0 + 19738.582354216393j,
                  0 + 39477.164708432785j])
Exemple #23
0
    def test_response_calculation_from_resp_files(self):
        """
        Test the response calculations with the obspy.core interface.

        Compares with directly calling evalresp.
        """
        # Very broad range but the responses should be exactly identical as
        # they use the same code under the hood so it should prove no issue.
        frequencies = np.logspace(-3, 3, 20)

        for filename in self.resp_files:
            # Set the times for the response.
            t = obspy.UTCDateTime(2008, 1, 1)
            if "AZ.DHL..BS1" in filename:
                t = obspy.UTCDateTime(1999, julday=351)
            elif "BK.DANT.00.LCL" in filename:
                t = obspy.UTCDateTime(2017, 1, 1)
            elif "BN.WR0..SHZ" in filename:
                t = obspy.UTCDateTime(1998, 1, 1)

            for unit in ("DISP", "VEL", "ACC"):
                r = obspy.read_inventory(filename)[0][0][0].response
                e_r = evalresp_for_frequencies(
                    t_samp=None, frequencies=frequencies, filename=filename,
                    date=t, units=unit)
                i_r = r.get_evalresp_response_for_frequencies(
                    frequencies=frequencies, output=unit)
                # This is in general very dangerous for floating point numbers
                # but they use exactly the same code under the hood here so it
                # is okay - if we ever have our own response calculation code
                # this will have to be changed.
                np.testing.assert_equal(e_r, i_r, "%s - %s" % (filename, unit))
Exemple #24
0
 def test_write_inventory_shapefile_via_plugin(self):
     inv = read_inventory()
     with TemporaryWorkingDirectory():
         with warnings.catch_warnings(record=True) as w:
             warnings.filterwarnings('always')
             inv.write("inventory.shp", "SHAPEFILE")
         for w_ in w:
             try:
                 self.assertEqual(
                     str(w_.message), PYSHP_VERSION_WARNING)
             except AssertionError:
                 continue
             break
         else:
             if not PYSHP_VERSION_AT_LEAST_1_2_11:
                 raise AssertionError('pyshape version warning not shown')
         for suffix in SHAPEFILE_SUFFIXES:
             self.assertTrue(os.path.isfile("inventory" + suffix))
         with open("inventory.shp", "rb") as fh_shp, \
                 open("inventory.dbf", "rb") as fh_dbf, \
                 open("inventory.shx", "rb") as fh_shx:
             shp = shapefile.Reader(shp=fh_shp, shx=fh_shx, dbf=fh_dbf)
             # check contents of shapefile that we just wrote
             _assert_records_and_fields(
                 got_fields=shp.fields, got_records=shp.records(),
                 expected_fields=expected_inventory_fields,
                 expected_records=expected_inventory_records)
             self.assertEqual(shp.shapeType, shapefile.POINT)
             _close_shapefile_reader(shp)
    def test_read_and_write_full_file(self):
        """
        Test that reading and writing of a full StationXML document with all
        possible tags works.
        """
        filename = os.path.join(self.data_dir, "full_random_stationxml.xml")
        inv = obspy.read_inventory(filename)

        # Write it again. Also validate it to get more confidence. Suppress the
        # writing of the ObsPy related tags to ease testing.
        file_buffer = io.BytesIO()

        # XXX helper variable to debug writing the full random file, set True
        # XXX for debug output
        write_debug_output = False

        inv.write(file_buffer, format="StationXML",
                  validate=(not write_debug_output),
                  _suppress_module_tags=True)
        file_buffer.seek(0, 0)

        if write_debug_output:
            with open("/tmp/debugout.xml", "wb") as open_file:
                open_file.write(file_buffer.read())
            file_buffer.seek(0, 0)

        with open(filename, "rb") as open_file:
            expected_xml_file_buffer = io.BytesIO(open_file.read())
        expected_xml_file_buffer.seek(0, 0)

        self._assert_station_xml_equality(file_buffer,
                                          expected_xml_file_buffer)
Exemple #26
0
 def test_len(self):
     """
     Tests the __len__ property.
     """
     inv = read_inventory()
     self.assertEqual(len(inv), len(inv.networks))
     self.assertEqual(len(inv), 2)
Exemple #27
0
    def test_reading_and_writing_full_root_tag(self):
        """
        Tests reading and writing a full StationXML root tag.
        """
        filename = os.path.join(
            self.data_dir,
            "minimal_with_non_obspy_module_and_sender_tags_station.xml")
        inv = obspy.read_inventory(filename)
        self.assertEqual(inv.source, "OBS")
        self.assertEqual(inv.created, obspy.UTCDateTime(2013, 1, 1))
        self.assertEqual(len(inv.networks), 1)
        self.assertEqual(inv.networks[0].code, "PY")
        self.assertEqual(inv.module, "Some Random Module")
        self.assertEqual(inv.module_uri, "http://www.some-random.site")
        self.assertEqual(inv.sender, "The ObsPy Team")

        # Write it again. Do not write the module tags.
        file_buffer = io.BytesIO()
        inv.write(file_buffer, format="StationXML", validate=True,
                  _suppress_module_tags=True)
        file_buffer.seek(0, 0)

        with open(filename, "rb") as open_file:
            expected_xml_file_buffer = io.BytesIO(open_file.read())
        expected_xml_file_buffer.seek(0, 0)

        self._assert_station_xml_equality(
            file_buffer, expected_xml_file_buffer)
Exemple #28
0
    def test_stationxml_with_availability(self):
        """
        A variant of StationXML has support for availability information.
        Make sure this works.
        """
        filename = os.path.join(self.data_dir,
                                "stationxml_with_availability.xml")
        inv = obspy.read_inventory(filename, format="stationxml")
        channel = inv[0][0][0]
        self.assertEqual(channel.data_availability.start,
                         obspy.UTCDateTime("1998-10-26T20:35:58"))
        self.assertEqual(channel.data_availability.end,
                         obspy.UTCDateTime("2014-07-21T12:00:00"))

        # Now write it again and compare to the original file.
        file_buffer = io.BytesIO()
        inv.write(file_buffer, format="StationXML",
                  _suppress_module_tags=True)
        file_buffer.seek(0, 0)

        with open(filename, "rb") as open_file:
            expected_xml_file_buffer = io.BytesIO(open_file.read())
        expected_xml_file_buffer.seek(0, 0)

        self._assert_station_xml_equality(file_buffer,
                                          expected_xml_file_buffer)
Exemple #29
0
    def test_read_and_write_minimal_file(self):
        """
        Test that writing the most basic StationXML document possible works.
        """
        filename = os.path.join(self.data_dir, "minimal_station.xml")
        inv = obspy.read_inventory(filename)

        # Assert the few values that are set directly.
        self.assertEqual(inv.source, "OBS")
        self.assertEqual(inv.created, obspy.UTCDateTime(2013, 1, 1))
        self.assertEqual(len(inv.networks), 1)
        self.assertEqual(inv.networks[0].code, "PY")

        # Write it again. Also validate it to get more confidence. Suppress the
        # writing of the ObsPy related tags to ease testing.
        file_buffer = io.BytesIO()
        inv.write(file_buffer, format="StationXML", validate=True,
                  _suppress_module_tags=True)
        file_buffer.seek(0, 0)

        with open(filename, "rb") as open_file:
            expected_xml_file_buffer = io.BytesIO(open_file.read())
        expected_xml_file_buffer.seek(0, 0)

        self._assert_station_xml_equality(file_buffer,
                                          expected_xml_file_buffer)
def get_stationxml_inventories(stationxml_dir, verbose=False):
    """
    Reads inventories in all StationXML (*.xml) files
    of specified dir

    @type stationxml_dir: unicode or str
    @type verbose: bool
    @rtype: list of L{obspy.station.inventory.Inventory}
    """
    inventories = []

    # list of *.xml files
    flist = glob.glob(pathname=os.path.join(stationxml_dir, "*.xml"))

    if verbose:
        if flist:
            print "Reading inventory in StationXML file:",
        else:
            s = u"Could not find any StationXML file (*.xml) in dir: {}!"
            print s.format(stationxml_dir)

    for f in flist:
        if verbose:
            print os.path.basename(f),
        inv = read_inventory(f, format='stationxml')
        inventories.append(inv)

    if flist and verbose:
        print

    return inventories
Exemple #31
0
    def open_xml_file(self):
        self.stn_filename = str(
            QtGui.QFileDialog.getOpenFileName(
                parent=self,
                caption="Choose StationXML Metadata File",
                directory=os.path.expanduser("~"),
                filter="XML Files (*.xml)"))
        if not self.stn_filename:
            return

        self.inv = read_inventory(self.stn_filename)

        print('')
        print(self.inv)

        self.channel_codes = []
        # get the channel names for dataset
        for _j, chan in enumerate(self.inv[0][0]):
            self.channel_codes.append(self.inv[0][0][_j].code)

        self.plot_inv()

        self.build_station_view_list()
Exemple #32
0
    def test_response_plot(self):
        """
        Tests the response plot.
        """
        # Bug in matplotlib 1.4.0 - 1.4.x:
        # See https://github.com/matplotlib/matplotlib/issues/4012
        reltol = 1.0
        if [1, 4, 0] <= MATPLOTLIB_VERSION <= [1, 5, 0]:
            reltol = 2.0

        net = read_inventory()[0]
        t = UTCDateTime(2008, 7, 1)
        with warnings.catch_warnings(record=True):
            warnings.simplefilter("ignore")
            with ImageComparison(self.image_dir,
                                 "network_response.png",
                                 reltol=reltol) as ic:
                rcParams['savefig.dpi'] = 72
                net.plot_response(0.002,
                                  output="DISP",
                                  channel="B*E",
                                  time=t,
                                  outfile=ic.name)
Exemple #33
0
    def add_inv(self, input, unit):

        if input == 'staxml':

            inf = self.ids[0].split('.')[0:2]
            file = '{}.{}.xml'.format(*inf)
            file = os.path.join('meta', 'stationxml', file)

            self.inv = read_inventory(file)

        elif input == 'resp':

            self.inv = {}

            for id in self.ids:
                inf = id.split('.')
                file = 'RESP.{}.{}.{}.{}'.format(*inf)
                file = os.path.join('meta', 'resp', file)
                self.inv[id] = {'filename': file, 'units': unit}

        else:
            msg = 'input must be \'resp\' or \'staxml\''
            raise ValueError(msg)
Exemple #34
0
    def test_response_plot_degrees(self):
        """
        Tests the response plot in degrees.
        """
        # Bug in matplotlib 1.4.0 - 1.4.x:
        # See https://github.com/matplotlib/matplotlib/issues/4012
        reltol = 1.0
        if [1, 4, 0] <= MATPLOTLIB_VERSION <= [1, 5, 0]:
            reltol = 2.0

        resp = read_inventory()[0][0][0].response
        with warnings.catch_warnings(record=True):
            warnings.simplefilter("ignore")
            with ImageComparison(self.image_dir,
                                 "response_response_degrees.png",
                                 reltol=reltol) as ic:
                rcParams['savefig.dpi'] = 72
                resp.plot(0.001,
                          output="VEL",
                          start_stage=1,
                          end_stage=3,
                          plot_degrees=True,
                          outfile=ic.name)
Exemple #35
0
 def test_wrong_trace_id_message(self):
     """
     Test that we get the expected warning message on waveform/metadata
     mismatch.
     """
     tr, _paz = _get_sample_data()
     inv = read_inventory(os.path.join(self.path, 'IUANMO.xml'))
     st = Stream([tr])
     ppsd = PPSD(tr.stats, inv)
     # metadata doesn't fit the trace ID specified via stats
     # should show a warning..
     with warnings.catch_warnings(record=True) as w:
         warnings.simplefilter('always')
         ret = ppsd.add(st)
         # the trace is sliced into four segments, so we get the warning
         # message four times..
         self.assertEqual(len(w), 4)
         for w_ in w:
             self.assertTrue(
                 str(w_.message).startswith(
                     "Error getting response from provided metadata"))
     # should not add the data to the ppsd
     self.assertFalse(ret)
Exemple #36
0
def test_window_on_trace(tmpdir):
    obs_tr = read(obsfile).select(channel="*R")[0]
    syn_tr = read(synfile).select(channel="*R")[0]

    config_file = os.path.join(DATA_DIR, "window", "27_60.BHZ.config.yaml")
    config = win.load_window_config_yaml(config_file)

    cat = readEvents(quakeml)

    inv = read_inventory(staxml)
    windows = win.window_on_trace(obs_tr,
                                  syn_tr,
                                  config,
                                  station=inv,
                                  event=cat,
                                  _verbose=False,
                                  figure_mode=False)

    filename = os.path.join(str(tmpdir), "window.txt")
    wrw.write_txtfile(windows, filename)

    filename = os.path.join(str(tmpdir), "window.json")
    wrw.write_jsonfile(windows, filename)
Exemple #37
0
def analyze_noise(data_files,response,decimateby=5):
    """run through data files and create PPSD objects using obsy
    """
    data_files.sort()
    print("++ {} data files".format(len(data_files)))
    inv = read_inventory(response)
    # initialize PPSD with first datafile
    print("1/{} Initializing with data file: ".format(len(data_files)),
                                    os.path.basename(data_files[0]),end='... ')
    start = time.time()
    st = read(data_files[0])
    if decimateby != 0:
        st.decimate(decimateby)
    ppsd = PPSD(st[0].stats, metadata=inv)
    ppsd.add(st)
    year_start = st[0].stats.starttime.year
    jday_start = st[0].stats.starttime.julday
    end = time.time()
    print("complete ({}s)".format(round(end-start,2)))

    # loop over rest of datafiles and add to ppsd
    for i,filename in enumerate(data_files[1:]):
        print('{0}/{1} {2}'.format(i+2,len(data_files),
                                        os.path.basename(filename)),end='... ')
        try:
            start = time.time()
            st = read(filename)
            if decimateby != 0:
                st.decimate(decimateby)
            ppsd.add(st)
            end = time.time()
            print("complete ({}s)".format(round(end-start,2)))
        except Exception as e:
            print(e)
            pass

    return ppsd
Exemple #38
0
    def test_numbers_are_written_to_poles_and_zeros(self):
        """
        Poles and zeros have a number attribute. Make sure this is written,
        even if set with a custom complex list.
        """
        # Read default inventory and cut down to a single channel.
        inv = obspy.read_inventory()
        inv.networks = inv[:1]
        inv[0].stations = inv[0][:1]
        inv[0][0].channels = inv[0][0][:1]

        # Manually set the poles and zeros - thus these are cast to our
        # custom classes but number are not yet set.
        inv[0][0][0].response.response_stages[0].poles = [0 + 1j, 2 + 3j]
        inv[0][0][0].response.response_stages[0].zeros = [0 + 1j, 2 + 3j]

        with io.BytesIO() as buf:
            inv.write(buf, format="stationxml", validate=True)
            buf.seek(0, 0)
            data = buf.read().decode()

        # Ugly test - remove all whitespace and make sure the four following
        # lines are part of the written output.
        data = re.sub('\s+', ' ', data)

        self.assertIn(
            '<Zero number="0"> <Real>0.0</Real> '
            '<Imaginary>1.0</Imaginary> </Zero>', data)
        self.assertIn(
            '<Zero number="1"> <Real>2.0</Real> '
            '<Imaginary>3.0</Imaginary> </Zero>', data)
        self.assertIn(
            '<Pole number="0"> <Real>0.0</Real> '
            '<Imaginary>1.0</Imaginary> </Pole>', data)
        self.assertIn(
            '<Pole number="1"> <Real>2.0</Real> '
            '<Imaginary>3.0</Imaginary> </Pole>', data)
Exemple #39
0
    def test_response_list_stage(self):
        """
        This is quite rare but it happens.
        """
        inv = read_inventory(os.path.join(self.data_dir, "IM_IL31__BHZ.xml"))

        sampling_rate = 40.0
        t_samp = 1.0 / sampling_rate
        nfft = 100

        cpx_response, freq = inv[0][0][0].response.get_evalresp_response(
            t_samp=t_samp, nfft=nfft, output="VEL", start_stage=None,
            end_stage=None)

        # Cut of the zero frequency.
        cpx_response = cpx_response[1:]

        amp = np.abs(cpx_response)
        phase = np.angle(cpx_response)
        freq = freq[1:]

        # The expected output goes from 1 to 20 Hz - its somehow really hard
        # to get evalresp to produce results for the desired frequencies so
        # I just gave up on it.
        exp_f, exp_amp, exp_ph = np.loadtxt(os.path.join(
            self.data_dir, "expected_response_IM_IL31__BHZ.txt")).T
        # Interpolate.
        exp_amp = scipy.interpolate.InterpolatedUnivariateSpline(
            exp_f, exp_amp, k=3)(freq)
        exp_ph = scipy.interpolate.InterpolatedUnivariateSpline(
            exp_f, exp_ph, k=3)(freq)
        exp_ph = np.deg2rad(exp_ph)

        # The output is not exactle the same as ObsPy performs a different
        # but visually quite a bit better interpolation.
        np.testing.assert_allclose(amp, exp_amp, rtol=1E-3)
        np.testing.assert_allclose(phase, exp_ph, rtol=1E-3)
Exemple #40
0
    def test_read_and_write_full_file(self):
        """
        Test that reading and writing of a full StationXML document with all
        possible tags works.
        """
        filename = os.path.join(self.data_dir, "full_random_stationxml.xml")
        inv = obspy.read_inventory(filename)

        # Write it again. Also validate it to get more confidence. Suppress the
        # writing of the ObsPy related tags to ease testing.
        file_buffer = io.BytesIO()

        inv.write(file_buffer,
                  format="StationXML",
                  validate=True,
                  _suppress_module_tags=True)
        file_buffer.seek(0, 0)

        with open(filename, "rb") as open_file:
            expected_xml_file_buffer = io.BytesIO(open_file.read())
        expected_xml_file_buffer.seek(0, 0)

        self._assert_station_xml_equality(file_buffer,
                                          expected_xml_file_buffer)
Exemple #41
0
 def test_preprocess(self):
     stream = read()
     day = UTC('2018-01-02')
     for tr in stream:
         tr.stats.starttime = day
     tr = stream[1]
     tr.id = 'GR.FUR..BH' + tr.stats.channel[-1]
     tr.stats.sampling_rate = 80.
     tr = stream[2]
     tr.id = 'GR.WET..BH' + tr.stats.channel[-1]
     tr.stats.sampling_rate = 50.
     stream = stream.cutout(day + 0.01, day + 10)
     stream = stream.cutout(day + 14, day + 16.05)
     norm = ('clip', 'spectral_whitening', 'mute_envelope', '1bit')
     # see https://docs.scipy.org/doc/numpy-1.13.0/release.html#
     # assigning-to-slices-views-of-maskedarray
     ignore_msg = r'setting an item on a masked array which has a shared'
     with np.warnings.catch_warnings():
         np.warnings.filterwarnings('ignore', ignore_msg)
         preprocess(stream,
                    day=day,
                    inventory=read_inventory(),
                    remove_response=True,
                    filter=None,
                    normalization=norm,
                    time_norm_options=None,
                    spectral_whitening_options=None,
                    decimate=5)
     for tr in stream:
         self.assertEqual(tr.stats.sampling_rate, 10)
     for tr in stream:
         self.assertEqual(set(tr.data._data), {-1, 0, 1})
         mask = np.ma.getmask(tr.data)
         np.testing.assert_equal(tr.data[mask]._data, 0)
         self.assertGreater(np.count_nonzero(mask), 0)
     self.assertEqual(len(stream), 3)
Exemple #42
0
    def test_response_plot(self):
        """
        Tests the response plot.
        """
        # Bug in matplotlib 1.4.0 - 1.4.2:
        # See https://github.com/matplotlib/matplotlib/issues/4012
        reltol = 1.0
        if [1, 4, 0] <= MATPLOTLIB_VERSION <= [1, 4, 2]:
            reltol = 2.0

        inv = read_inventory()
        t = UTCDateTime(2008, 7, 1)
        with warnings.catch_warnings(record=True):
            warnings.simplefilter("ignore")
            with ImageComparison(self.image_dir,
                                 "inventory_response.png",
                                 reltol=reltol) as ic:
                rcParams['savefig.dpi'] = 72
                inv.plot_response(0.01,
                                  output="ACC",
                                  channel="*N",
                                  station="[WR]*",
                                  time=t,
                                  outfile=ic.name)
Exemple #43
0
    def test_subsecond_read_and_write_minimal_file(self):
        """
        Test reading and writing of sub-second time in datetime field,
        using creation time

        """
        filename = os.path.join(self.data_dir,
                                "minimal_station_with_microseconds.xml")
        inv = obspy.read_inventory(filename)

        # Write it again. Also validate it to get more confidence. Suppress the
        # writing of the ObsPy related tags to ease testing.
        file_buffer = io.BytesIO()

        inv.write(file_buffer, format="StationXML", validate=True,
                  _suppress_module_tags=True)
        file_buffer.seek(0, 0)

        with open(filename, "rb") as open_file:
            expected_xml_file_buffer = io.BytesIO(open_file.read())
        expected_xml_file_buffer.seek(0, 0)

        self._assert_station_xml_equality(file_buffer,
                                          expected_xml_file_buffer)
Exemple #44
0
    def test_inventory_select_with_empty_networks(self):
        """
        Tests the behaviour of the Inventory.select() method with empty
        Network objects.
        """
        inv = read_inventory()

        # Empty all networks.
        for net in inv:
            net.stations = []

        self.assertEqual(len(inv), 2)
        self.assertEqual(sum(len(net) for net in inv), 0)

        # No arguments, everything should be selected.
        self.assertEqual(len(inv), 2)
        # Same if everything is selected.
        self.assertEqual(len(inv.select(network="*")), 2)
        # Select only one.
        self.assertEqual(len(inv.select(network="BW")), 1)
        self.assertEqual(len(inv.select(network="G?")), 1)
        # Should only be empty if trying to select something that does not
        # exist.
        self.assertEqual(len(inv.select(network="RR")), 0)
Exemple #45
0
def get_data_OVPF(cfg, starttime, window_length):

    inv = read_inventory(cfg.response_fname)
    parser = Parser(cfg.BOR_response_fname)

    st = Stream()
    for sta in cfg.station_names:
        if sta == 'BOR':
            st_tmp = io.get_waveform_data(starttime,
                                          window_length,
                                          'PF',
                                          sta,
                                          '??Z',
                                          parser,
                                          simulate=True)
        else:
            st_tmp = io.get_waveform_data(starttime, window_length, 'PF', sta,
                                          '??Z', inv)

        if st_tmp is not None:

            st += st_tmp

    return st
Exemple #46
0
def beacon_waveforms(station_name, start, end, **kwargs):
    """
    get beacon station waveforms based on station number
    :param number:
    :return:
    """
    path = kwargs.get("path", None)
    inv_path = kwargs.get("inv_path", None)

    code = f"XX.{station_name}.10.HH?.D.{start.year}.{start.julday:0>3}"
    net, sta, loc, cha, d, year, jday = code.split(".")

    path = path.format(year=start.year, sta=sta)
    st = Stream()
    for fid in glob.glob(os.path.join(path, code)):
        st += read(fid)

    st.trim(start, end)
    inv = read_inventory(inv_path)

    # Will only attach the relevant response
    st.attach_response(inv)

    return st
Exemple #47
0
def freddy_sac_convert(root_dir, outdir, inv_file):
    """
    Function to do mseed to sac convertion on fede's machine
    :param root_dir: root directory path (day_volumes)
    :param outdir: path to output directory for sac files
    :param inv_file: path to stationxml metadata file
    :return:
    """
    # Read inventory from StationXML. This will recognize the format automatically
    inv = read_inventory(inv_file)
    # Loop through just immediate subdirectoies (stations)
    for root, dirs, wav_files in os.walk(root_dir):
        sta = root.split('/')[-1]
        # Glob creates a list of all of the files in a dir that match a pattern
        # Check if a subdirectory exists with this station name
        # in the output directory. If it doesn't, make one.
        # This just mimics your original structure. Change it if you want.
        if not os.path.exists('{}/{}'.format(outdir, sta)):
            os.mkdir('{}/{}'.format(outdir, sta))
        # Loop through each of those files
        for wav_file in wav_files:
            # Splits the file path by '/' and takes the last element
            # Splits the file name by '.' for use when nameing the output file
            name_parts = wav_file.split('.')
            # Read into Stream, merge, take first (only) trace
            tr = read(os.path.join(
                root, wav_file)).merge(fill_value='interpolate')[0]
            tr = attach_sac_header(tr, inv)
            chan = tr.stats.channel
            # So now we've got the trace with all of the necessary
            # header info. We'll create the filename first, then save it
            out_path = '{}/{}/{}.{}.{}.{}.sac'.format(outdir, sta,
                                                      name_parts[-2],
                                                      name_parts[-1], sta,
                                                      chan)
            tr.write(out_path, format='SAC')
def read_waveform(waveform_dir,
                  t_ref,
                  stat,
                  net,
                  baz,
                  channels=['BHU', 'BHV', 'BHW'],
                  location='03'):
    st = Stream()
    inv = read_inventory('inventory.xml')
    t_end = t_ref + t_post
    t_start = t_ref - t_pre
    for channel in channels:
        fnam = (pjoin(
            waveform_dir, channel + '.D', '%s.%s.%s.%s.D.%04d.%03d' %
            (net, stat, location, channel, t_ref.year, t_ref.julday)))
        st += read(fnam, starttime=t_start - 3600, endtime=t_end + 3600)
        # fnam = (pjoin(waveform_dir,
        #               channel+'.D',
        #               '%s.%s.%s.%s.D.%04d.%03d' %
        #               (net, stat, location, channel, t_ref.year, t_ref.julday+1)))
        # st += read(fnam, starttime=t_start-3600, endtime=t_end+3600)
    st.merge()
    st.remove_response(inv, pre_filt=(fmin * 0.8, fmin, 1. / 1.5, 1. / 2))
    st.differentiate()
    #st.filter('lowpass', freq=1. / 2., zerophase=True)
    #st.filter('highpass', freq=fmin, zerophase=True)
    st.trim(starttime=t_start, endtime=t_end)
    #st_ZNE = st.rotate(method='->ZNE', inventory=inv)
    st_ZNE = st._rotate_specific_channels_to_zne(
        network=net,
        station=stat,
        location='03',
        channels=['BHU', 'BHV', 'BHW'],
        inventory=inv)
    st_ZRT = st_ZNE.rotate(method='NE->RT', back_azimuth=baz)
    return st_ZRT
Exemple #49
0
def test_dataframe_to_inventory(iris_mocker, tmp_path):
    """Test conversion of a Pandas dataframe representation conforming to TABLE_SCHEMA back to station XML file
    """
    outfile = str(tmp_path / 'df_to_inv.xml')
    assert not os.path.exists(outfile)
    # Generate test dataframe by using function inventory_to_dataframe().
    with io.BytesIO(iris_mocker.get_minimal_response().encode('utf-8')) as buffer_file:
        obspy_inv = read_inventory(buffer_file)
        test_df = inventory_to_dataframe(obspy_inv)
        expected_query = "https://service.iris.edu/fdsnws/station/1/query?net=GE&sta=*&cha=*HZ&level=response" \
                         "&format=xml&includerestricted=false&includecomments=false&nodata=404"
        iris_mocker.get(expected_query, text=iris_mocker.get_minimal_response())
        instruments = extract_unique_sensors_responses(obspy_inv, requests, show_progress=True, test_mode=True)
        dataframe_to_fdsn_station_xml(test_df, instruments, outfile, show_progress=True)
    assert os.path.exists(outfile)
    # Test that the expected network/station/channel codes are there and that each channel
    # has an instrument response.
    with open(outfile, 'r') as f:
        contents = f.read()
        assert contents.count('<Network code="GE"') == 1
        assert contents.count('<Station code="MAHO"') == 1
        assert contents.count('<Channel code="BHZ"') == 1
        assert contents.count('<Response>') == 1
    os.remove(outfile)
Exemple #50
0
    def inventory(self):
        """
        def SDSFile::inventory
        Returns the FDSNWSXML inventory
        """

        if self._inventory is not None:
            return self._inventory

        # Query our FDSNWS Webservice for the station location
        request = os.path.join(self.fdsnws, self.query_string_xml)

        try:
            self._inventory = read_inventory(request)

        # Re-raise in case this is the Rule Manager timeout going off
        except TimeoutError:
            raise

        # Deal with an Exception from read_inventory
        except Exception:
            return None

        return self._inventory
Exemple #51
0
def main():
    parser = argparse.ArgumentParser(prog=progname,
            formatter_class=argparse.RawDescriptionHelpFormatter,
            description=
            'dump a stationxml file to txt file')

    parser.add_argument("-i","--staxml", type=str,
        required=True, help="Name of the station xml file")

    parser.add_argument("-o","--output", type=str,
        required=True, help="output txt file")

    parser.add_argument("-v", "--verbose", action="count",default=0,
        help="increase spewage")

    parser.add_argument('--version', action='version',
        version='%(prog)s {version}'.format(version=__version__))

    args = parser.parse_args()

    inv_name=args.staxml
    debug=args.verbose
    inv=read_inventory(inv_name)
    dump_output(inv,args.output,debug)
Exemple #52
0
    def test_segfault_after_error_handling(self):
        """
        Many functions in evalresp call `error_return()` which uses longjmp()
        to jump to some previously set state.

        ObsPy calls some evalresp functions directly so evalresp cannot call
        setjmp(). In that case longjmp() jumps to an undefined location, most
        likely resulting in a segfault.

        This test tests a workaround for this issue.

        As long as it does not segfault the test is doing alright.
        """
        filename = os.path.join(self.data_dir,
                                "TM.SKLT.__.BHZ_faulty_response.xml")
        inv = read_inventory(filename)

        t_samp = 0.05
        nfft = 256

        resp = inv[0][0][0].response
        with CatchOutput():
            with pytest.raises(ValueError):
                resp.get_evalresp_response(t_samp, nfft, output="DISP")
Exemple #53
0
def main():
    base_dir = '/g/data/ha3/Passive/_ANU/7B(1993-1995)'
    asdf_file = os.path.join(base_dir, 'ASDF', '7B(1993-1995).h5')
    out_dir = os.path.join(base_dir, 'small_mseed_DATA')

    inv = read_inventory(os.path.join(base_dir, '7B.xml'))
    asdf = ASDFDataSet(asdf_file, mode='r')

    for sta in inv.networks[0].stations:
        if asdf.waveforms.__contains__(inv.networks[0].code + '.' + sta.code):
            for i in asdf.waveforms[inv.networks[0].code + '.' +
                                    sta.code].list():
                if i.endswith('raw_recording'):
                    start_time = UTC(i.split("__")[1])
                    st = asdf.waveforms[inv.networks[0].code + '.' +
                                        sta.code][i]
                    medn = np.median(st[0].data)
                    while (abs(st[0].data[np.argmax(st[0].data)]) > 1e8
                           or abs(st[0].data[np.argmin(st[0].data)]) > 1e8):
                        if abs(st[0].data[np.argmax(st[0].data)]) > 1e8:
                            st[0].data[np.argmax(
                                st[0].data)] = abs(medn) if st[0].data[
                                    np.argmax(st[0].data)] > 0 else -abs(medn)
                        if abs(st[0].data[np.argmin(st[0].data)]) > 1e8:
                            st[0].data[np.argmin(
                                st[0].data)] = abs(medn) if st[0].data[
                                    np.argmin(st[0].data)] > 0 else -abs(medn)
                    while (start_time + 86400 < UTC(i.split("__")[2])):
                        tr = st[0].copy()
                        create_chunk(out_dir, tr, start_time,
                                     start_time + 86400, sta)
                        start_time += 86400
                    if start_time < UTC(i.split("__")[2]):
                        tr = st[0].copy()
                        create_chunk(out_dir, tr, start_time,
                                     UTC(i.split("__")[2]), sta)
Exemple #54
0
def test_window_on_trace():
    obs_tr = read(obsfile).select(channel="*R")[0]
    syn_tr = read(synfile).select(channel="*R")[0]

    config_file = os.path.join(DATA_DIR, "window", "27_60.BHZ.config.yaml")
    config = win.load_window_config_yaml(config_file)

    cat = readEvents(quakeml)

    inv = read_inventory(staxml)
    windows = win.window_on_trace(obs_tr,
                                  syn_tr,
                                  config,
                                  station=inv,
                                  event=cat,
                                  _verbose=False,
                                  figure_mode=False)

    winfile_bm = os.path.join(DATA_DIR, "window", "IU.KBL..BHR.window.json")
    with open(winfile_bm) as fh:
        windows_json = json.load(fh)
    for _win, _win_json_bm in zip(windows, windows_json):
        _win_bm = Window._load_from_json_content(_win_json_bm)
        assert _win == _win_bm
Exemple #55
0
    def test_reading_channel_without_coordinates(self):
        """
        Tests reading a file with an empty channel object. This is strictly
        speaking not valid but we are forgiving.
        """
        filename = os.path.join(self.data_dir,
                                "channel_without_coordinates.xml")
        with warnings.catch_warnings(record=True) as w:
            warnings.simplefilter("always")
            inv = obspy.read_inventory(filename)

        # Should raise a warning that it could not read the channel without
        # coordinates.
        self.assertEqual(len(w), 1)
        self.assertEqual(
            w[0].message.args[0],
            "Channel 00.BHZ of station LATE does not have a complete set of "
            "coordinates and thus it cannot be read. It will not be part of "
            "the final inventory object.")

        self.assertEqual(
            inv.get_contents(),
            {'networks': ['IV'], 'stations': ['IV.LATE (Latera)'],
             'channels': []})
Exemple #56
0
def get_inventory(sender='get_inventory'):
    global inv, stn
    sender = 'get_inventory'
    if 'Z0000' in stn:
        printM('No station name given, continuing without inventory.', sender)
        inv = False
    else:
        try:
            printM(
                'Fetching inventory for station %s.%s from Raspberry Shake FDSN.'
                % (net, stn), sender)

            inv = read_inventory(
                'https://fdsnws.raspberryshakedata.com/fdsnws/station/1/query?network=%s&station=%s&starttime=%s&level=resp&nodata=404&format=xml'
                %
                (net, stn, str(UTCDateTime.now() - timedelta(seconds=14400))))
            printM('Inventory fetch successful.', sender)
        except (IndexError, HTTPError):
            printM(
                'WARNING: No inventory found for %s. Are you forwarding your Shake data?'
                % stn, sender)
            printM(
                '         Deconvolution will only be available if data forwarding is on.',
                sender)
            printM(
                '         Access the config page of the web front end for details.',
                sender)
            printM(
                '         More info at https://manual.raspberryshake.org/quickstart.html',
                sender)
            inv = False
        except Exception as e:
            printM('ERROR: Inventory fetch failed!', sender)
            printM('       Error detail: %s' % e, sender)
            inv = False
    return inv
Exemple #57
0
    def _extract_index_values_stationxml(filename):
        """
        Reads StationXML files and extracts some keys per channel.
        """
        try:
            inv = obspy.read_inventory(filename, format="stationxml")
        except:
            msg = "Not a valid StationXML file?"
            raise StationCacheError(msg)

        channels = []
        for network in inv:
            for station in network:
                for channel in station:
                    channel_id = "%s.%s.%s.%s" % (
                        network.code, station.code, channel.location_code,
                        channel.code)
                    if channel.response is None:
                        msg = "Channel %s has no response." % channel_id
                        raise StationCacheError(msg)
                    start_date = channel.start_date
                    if start_date:
                        start_date = int(start_date.timestamp)
                    end_date = channel.end_date
                    if end_date:
                        end_date = int(end_date.timestamp)
                    channels.append([
                        channel_id, start_date, end_date, channel.latitude,
                        channel.longitude, channel.elevation, channel.depth
                    ])

        if not channels:
            msg = "File has no channels."
            raise StationCacheError(msg)

        return channels
Exemple #58
0
    def _harvest_from_stationxml(self, session, station_xml):
        """
        Create/update Network, Station and ChannelEpoch objects from a
        station.xml file.

        :param :cls:`sqlalchemy.orm.sessionSession` session: SQLAlchemy session
        :param :cls:`io.BinaryIO` station_xml: Station XML file stream
        """

        try:
            inventory = read_inventory(station_xml, format='STATIONXML')
        except Exception as err:
            raise self.StationXMLParsingError(err)

        nets = []
        stas = []
        chas = []
        for inv_network in inventory.networks:
            self.logger.debug("Processing network: {0!r}".format(inv_network))
            net = self._emerge_network(session, inv_network)
            nets.append(net)

            for inv_station in inv_network.stations:
                self.logger.debug(
                    "Processing station: {0!r}".format(inv_station))
                sta = self._emerge_station(session, inv_station)
                stas.append(sta)

                for inv_channel in inv_station.channels:
                    self.logger.debug(
                        "Processing channel: {0!r}".format(inv_channel))
                    cha_epoch = self._emerge_channelepoch(
                        session, inv_channel, net, sta)
                    chas.append(cha_epoch)

        return nets, stas, chas
Exemple #59
0
def inv():
    """
    StationXML information for station NZ.BFZ.HH?
    """
    return read_inventory("./test_data/test_dataless_NZ_BFZ.xml")
Exemple #60
0
from jinja2 import Environment, FileSystemLoader, select_autoescape
from obspy import read_inventory, UTCDateTime
from weasyprint import HTML

if __name__ == "__main__":
    env = Environment(
        loader=FileSystemLoader("./"),
        autoescape=select_autoescape(['html', 'htm', 'xml'])
    )
    template = env.get_template('phase1.htm')
    # get some data to render
    inv = read_inventory("inventory.xml")
    startdate = UTCDateTime(2018,5,1)
    enddate = UTCDateTime(2018,5,2)
    # define which channel codes denote acceptable seismic channels
    SEISMIC_CHANNELS = ['BHE', 'BHN', 'BHZ', 'HHE', 'HHN', 'HHZ',
                 'BH1', 'BH2', 'BH3', 'HH1', 'HH2', 'HH3',
                 'ENE', 'ENN', 'ENZ', 'HNE', 'HNN', 'HNZ',
                 'EN1', 'EN2', 'EN3', 'HN1', 'HN2', 'HN3']

    for network in inv:
        for station in network:
            channel_metrics = {}
            for chan in station:
                 if chan.code in SEISMIC_CHANNELS:
                     if chan.location_code == "":
                         loc = "--"
                     else:
                         loc = chan.location_code
                     nslc = ".".join([network.code,station.code,chan.code,loc])
                     metrics = {