Beispiel #1
0
 def test_deepcopy(self):
     """
     Tests deepcopy method of Stats object.
     """
     stats = Stats()
     stats.network = "BW"
     stats["station"] = "ROTZ"
     stats["other1"] = {"test1": "1"}
     stats["other2"] = AttribDict({"test2": "2"})
     stats["other3"] = "test3"
     stats2 = copy.deepcopy(stats)
     stats.network = "CZ"
     stats.station = "RJOB"
     self.assertEqual(stats2.__class__, Stats)
     self.assertEqual(stats2.network, "BW")
     self.assertEqual(stats2.station, "ROTZ")
     self.assertEqual(stats2.other1.test1, "1")
     self.assertEqual(stats2.other1.__class__, AttribDict)
     self.assertEqual(len(stats2.other1), 1)
     self.assertEqual(stats2.other2.test2, "2")
     self.assertEqual(stats2.other2.__class__, AttribDict)
     self.assertEqual(len(stats2.other2), 1)
     self.assertEqual(stats2.other3, "test3")
     self.assertEqual(stats.network, "CZ")
     self.assertEqual(stats.station, "RJOB")
    def create_trace(self, channel, stats, data):
        """Utility to create a new trace object.

        Parameters
        ----------
        channel : str
            channel name.
        stats : obspy.core.Stats
            channel metadata to clone.
        data : numpy.array
            channel data.

        Returns
        -------
        obspy.core.Trace
            trace containing data and metadata.
        """
        stats = Stats(stats)
        if self.data_type is None:
            stats.data_type = 'adjusted'
        else:
            stats.data_type = self.data_type
        if self.data_type is None:
            stats.location = 'A0'
        else:
            stats.location = self.location

        trace = super(AdjustedAlgorithm, self).create_trace(channel, stats,
            data)
        return trace
Beispiel #3
0
 def test_deepcopy(self):
     """
     Tests deepcopy method of Stats object.
     """
     stats = Stats()
     stats.network = 'BW'
     stats['station'] = 'ROTZ'
     stats['other1'] = {'test1': '1'}
     stats['other2'] = AttribDict({'test2': '2'})
     stats['other3'] = 'test3'
     stats2 = copy.deepcopy(stats)
     stats.network = 'CZ'
     stats.station = 'RJOB'
     self.assertEqual(stats2.__class__, Stats)
     self.assertEqual(stats2.network, 'BW')
     self.assertEqual(stats2.station, 'ROTZ')
     self.assertEqual(stats2.other1.test1, '1')
     self.assertEqual(stats2.other1.__class__, AttribDict)
     self.assertEqual(len(stats2.other1), 1)
     self.assertEqual(stats2.other2.test2, '2')
     self.assertEqual(stats2.other2.__class__, AttribDict)
     self.assertEqual(len(stats2.other2), 1)
     self.assertEqual(stats2.other3, 'test3')
     self.assertEqual(stats.network, 'CZ')
     self.assertEqual(stats.station, 'RJOB')
Beispiel #4
0
    def test_casted_stats_nscl_writes_to_mseed(self):
        """
        Ensure a Stream object that has had its nslc types cast to str can
        still be written.
        """
        st = Stream(traces=read()[0])

        # Get a new stats object with just the basic items in it
        stats_items = set(Stats())
        new_stats = Stats()
        new_stats.__dict__.update({x: st[0].stats[x] for x in stats_items})
        with warnings.catch_warnings(record=True):
            new_stats.network = 1
            new_stats.station = 1.1
        new_stats.channel = 'Non'
        st[0].stats = new_stats
        # try writing stream to bytes buffer
        bio = io.BytesIO()
        st.write(bio, 'mseed')
        bio.seek(0)
        # read bytes and compare
        stt = read(bio)
        # remove _mseed so streams can compare equal
        stt[0].stats.pop('mseed')
        del stt[0].stats._format  # format gets added upon writing
        self.assertEqual(st, stt)
def _create_trace(data, channel, starttime, delta=60.):
    stats = Stats()
    stats.channel = channel
    stats.delta = delta
    stats.starttime = starttime
    stats.npts = len(data)
    data = numpy.array(data, dtype=numpy.float64)
    return Trace(data, stats)
Beispiel #6
0
 def test_update(self):
     """
     Tests update method of Stats object.
     """
     x = Stats({"a": 5})
     self.assertTrue("a" in dir(x))
     x.update({"b": 5})
     self.assertTrue("b" in dir(x))
     y = {"a": 5}
     y.update({"b": 5})
     x = Stats(y)
     self.assertTrue("b" in dir(x))
Beispiel #7
0
 def test_update(self):
     """
     Tests update method of Stats object.
     """
     x = Stats({'a': 5})
     self.assertTrue('a' in dir(x))
     x.update({'b': 5})
     self.assertTrue('b' in dir(x))
     y = {'a': 5}
     y.update({'b': 5})
     x = Stats(y)
     self.assertTrue('b' in dir(x))
Beispiel #8
0
 def test_nestedStats(self):
     """
     Various setter and getter tests.
     """
     # 1
     stats = Stats()
     stats.test = dict()
     stats.test['test2'] = 'muh'
     self.assertEqual(stats.test.test2, 'muh')
     self.assertEqual(stats.test['test2'], 'muh')
     self.assertEqual(stats['test'].test2, 'muh')
     self.assertEqual(stats['test']['test2'], 'muh')
     stats.test['test2'] = 'maeh'
     self.assertEqual(stats.test.test2, 'maeh')
     self.assertEqual(stats.test['test2'], 'maeh')
     self.assertEqual(stats['test'].test2, 'maeh')
     self.assertEqual(stats['test']['test2'], 'maeh')
     # 2 - multiple initialization
     stats = Stats({'muh': 'meah'})
     stats2 = Stats(Stats(Stats(stats)))
     self.assertEqual(stats2.muh, 'meah')
     # 3 - check conversion to AttribDict
     stats = Stats()
     stats.sub1 = {'muh': 'meah'}
     stats.sub2 = AttribDict({'muh2': 'meah2'})
     stats2 = Stats(stats)
     self.assertTrue(isinstance(stats.sub1, AttribDict))
     self.assertTrue(isinstance(stats.sub2, AttribDict))
     self.assertEqual(stats2.sub1.muh, 'meah')
     self.assertEqual(stats2.sub2.muh2, 'meah2')
def ascii(path, filenames):
    """ Reads SPECFEM3D-style ascii data
    """
    from numpy import loadtxt
    from obspy.core import Stream, Stats, Trace

    stream = Stream()
    for filename in filenames:
        stats = Stats()
        data = loadtxt(path +'/'+ filename)

        stats.filename = filename
        stats.starttime = data[0,0]
        stats.sampling_rate = data[0,1] - data[0,0]
        stats.npts = len(data[:,0])

        try:
            parts = filename.split('.')
            stats.network = parts[0]
            stats.station = parts[1]
            stats.channel = temp[2]
        except:
            pass

        stream.append(Trace(data=data[:,1], header=stats))

    return stream
Beispiel #10
0
 def test_nestedStats(self):
     """
     Various setter and getter tests.
     """
     # 1
     stats = Stats()
     stats.test = dict()
     stats.test["test2"] = "muh"
     self.assertEqual(stats.test.test2, "muh")
     self.assertEqual(stats.test["test2"], "muh")
     self.assertEqual(stats["test"].test2, "muh")
     self.assertEqual(stats["test"]["test2"], "muh")
     stats.test["test2"] = "maeh"
     self.assertEqual(stats.test.test2, "maeh")
     self.assertEqual(stats.test["test2"], "maeh")
     self.assertEqual(stats["test"].test2, "maeh")
     self.assertEqual(stats["test"]["test2"], "maeh")
     # 2 - multiple initialization
     stats = Stats({"muh": "meah"})
     stats2 = Stats(Stats(Stats(stats)))
     self.assertEqual(stats2.muh, "meah")
     # 3 - check conversion to AttribDict
     stats = Stats()
     stats.sub1 = {"muh": "meah"}
     stats.sub2 = AttribDict({"muh2": "meah2"})
     stats2 = Stats(stats)
     self.assertTrue(isinstance(stats.sub1, AttribDict))
     self.assertTrue(isinstance(stats.sub2, AttribDict))
     self.assertEqual(stats2.sub1.muh, "meah")
     self.assertEqual(stats2.sub2.muh2, "meah2")
Beispiel #11
0
 def test_setCalib(self):
     """
     Test to prevent setting a calibration factor of 0
     """
     x = Stats()
     # this should work
     x.update({'calib': 1.23})
     self.assertTrue(x.calib, 1.23)
     # this raises UserWarning
     with warnings.catch_warnings(record=True):
         warnings.simplefilter('error', UserWarning)
         # 1
         self.assertRaises(UserWarning, x.__setitem__, 'calib', 0)
         # 2
         self.assertRaises(UserWarning, x.update, {'calib': 0})
     # calib value should nevertheless be set to 0
     self.assertTrue(x.calib, 0)
Beispiel #12
0
 def test_simpleStats(self):
     """
     Various setter and getter tests.
     """
     stats = Stats()
     stats.test = 1
     self.assertEqual(stats.test, 1)
     self.assertEqual(stats['test'], 1)
     stats['test2'] = 2
     self.assertEqual(stats.test2, 2)
     self.assertEqual(stats['test2'], 2)
     stats['test'] = 2
     self.assertEqual(stats.test, 2)
     self.assertEqual(stats['test'], 2)
     stats.test2 = 1
     self.assertEqual(stats.test2, 1)
     self.assertEqual(stats['test2'], 1)
Beispiel #13
0
 def test_pickleStats(self):
     """
     Test pickling Stats objects. Test case for issue #10.
     """
     stats = Stats()
     stats.muh = 1
     stats['maeh'] = 'hallo'
     # ASCII
     temp = pickle.dumps(stats, protocol=0)
     stats2 = pickle.loads(temp)
     self.assertEqual(stats, stats2)
     # old binary
     temp = pickle.dumps(stats, protocol=1)
     stats2 = pickle.loads(temp)
     self.assertEqual(stats, stats2)
     # new binary
     temp = pickle.dumps(stats, protocol=2)
     stats2 = pickle.loads(temp)
     self.assertEqual(stats, stats2)
Beispiel #14
0
 def get_obspy_trace(self):
     """
     Return class contents as obspy.Trace object
     """
     stat = Stats()
     stat.network = self.net.split(b'\x00')[0].decode()
     stat.station = self.sta.split(b'\x00')[0].decode()
     location = self.loc.split(b'\x00')[0].decode()
     if location == '--':
         stat.location = ''
     else:
         stat.location = location
     stat.channel = self.chan.split(b'\x00')[0].decode()
     stat.starttime = UTCDateTime(self.start)
     stat.sampling_rate = self.rate
     stat.npts = len(self.data)
     return Trace(data=self.data, header=stat)
def create_empty_trace(trace, channel):
    """
    Utility to create an empty trace, similar to another trace.

    Parameters
    ----------
    trace: obspy.core.Trace
        Trace that is source of most metadata, including array length.
    channel: String
        Channel name for created Trace.

    Returns
    -------
    obspy.core.Trace
        a Trace object, filled with numpy.nan.
    """
    stats = Stats(trace.stats)
    stats.channel = channel
    count = len(trace.data)
    numpy_data = numpy.full((count), numpy.nan)
    return Trace(numpy_data, stats)
Beispiel #16
0
 def test_pickle_stats(self):
     """
     Test pickling Stats objects. Test case for issue #10.
     """
     stats = Stats()
     stats.muh = 1
     stats['maeh'] = 'hallo'
     # ASCII
     temp = pickle.dumps(stats, protocol=0)
     stats2 = pickle.loads(temp)
     self.assertEqual(stats, stats2)
     # old binary
     temp = pickle.dumps(stats, protocol=1)
     stats2 = pickle.loads(temp)
     self.assertEqual(stats, stats2)
     # new binary
     temp = pickle.dumps(stats, protocol=2)
     stats2 = pickle.loads(temp)
     self.assertEqual(stats, stats2)
     # SOH channels sampling_rate & delta == 0. for #1989
     stats.sampling_rate = 0
     pickle.loads(pickle.dumps(stats, protocol=0))
     pickle.loads(pickle.dumps(stats, protocol=1))
     pickle.loads(pickle.dumps(stats, protocol=2))
Beispiel #17
0
 def test_init(self):
     """
     Init tests.
     """
     stats = Stats({'test': 'muh'})
     stats['other1'] = {'test1': '1'}
     stats['other2'] = AttribDict({'test2': '2'})
     stats['other3'] = 'test3'
     self.assertEquals(stats.test, 'muh')
     self.assertEquals(stats['test'], 'muh')
     self.assertEquals(stats.other1.test1, '1')
     self.assertEquals(stats.other1.__class__, AttribDict)
     self.assertEquals(len(stats.other1), 1)
     self.assertEquals(stats.other2.test2, '2')
     self.assertEquals(stats.other2.__class__, AttribDict)
     self.assertEquals(len(stats.other2), 1)
     self.assertEquals(stats.other3, 'test3')
     self.assertTrue('test' in stats)
     self.assertTrue('test' in stats.__dict__)
Beispiel #18
0
def _make_stats(fi, tr_block_start, standard_orientation, details):
    """
    Make Stats object from information contained in the header of the trace.
    """
    base_scan_interval = _read(fi, 22, 1, 'binary')
    sampling_rate = int(1000 / (base_scan_interval / 16))
    # map sampling rate to band code according to seed standard
    band_map = {2000: 'G', 1000: 'G', 500: 'D', 250: 'D'}
    # geophone instrument code
    instrument_code = 'P'
    # mapping for "standard_orientation"
    standard_component_map = {'2': 'Z', '3': 'N', '4': 'E'}
    component = str(_read(fi, tr_block_start + 40, 1, 'binary'))
    if standard_orientation:
        component = standard_component_map[component]
    chan = band_map[sampling_rate] + instrument_code + component
    npts = _read(fi, tr_block_start + 27, 3, 'binary')
    start_time = _read(fi, tr_block_start + 20 + 2 * 32, 8, 'binary') / 1e6
    end_time = start_time + (npts - 1) * (1 / sampling_rate)
    network = _read(fi, tr_block_start + 20, 3, 'binary')
    station = _read(fi, tr_block_start + 23, 3, 'binary')
    location = _read(fi, tr_block_start + 26, 1, 'binary')
    statsdict = dict(starttime=UTCDateTime(start_time),
                     endtime=UTCDateTime(end_time),
                     sampling_rate=sampling_rate,
                     npts=npts,
                     network=str(network),
                     station=str(station),
                     location=str(location),
                     channel=chan)
    if details:
        statsdict['rg16'] = {}
        statsdict['rg16']['initial_headers'] = {}
        stats_initial_headers = statsdict['rg16']['initial_headers']
        stats_initial_headers.update(_read_initial_headers(fi))
        statsdict['rg16']['trace_headers'] = {}
        stats_tr_headers = statsdict['rg16']['trace_headers']
        stats_tr_headers.update(_read_trace_header(fi, tr_block_start))
        nbr_tr_header_block = _read(fi, tr_block_start + 9, 1, 'binary')
        if nbr_tr_header_block > 0:
            stats_tr_headers.update(
                _read_trace_headers(fi, tr_block_start, nbr_tr_header_block))
    return Stats(statsdict)
Beispiel #19
0
 def test_compare_with_dict(self):
     """
     Checks if Stats is still comparable to a dict object.
     """
     adict = {
         'network': '',
         'sampling_rate': 1.0,
         'test': 1,
         'station': '',
         'location': '',
         'starttime': UTCDateTime(1970, 1, 1, 0, 0),
         'delta': 1.0,
         'calib': 1.0,
         'npts': 0,
         'endtime': UTCDateTime(1970, 1, 1, 0, 0),
         'channel': ''
     }
     ad = Stats(adict)
     self.assertEqual(ad, adict)
     self.assertEqual(adict, ad)
Beispiel #20
0
    def load_npz(filename, metadata):
        """
        Loads previously computed PPSD results (from a
        compressed numpy binary in npz format, written with
        :meth:`~PPSD.write_npz`).
        Metadata have to be specified again during loading because they are not
        stored in the npz format.

        :type filename: str
        :param filename: Name of numpy .npz file with stored PPSD data
        :type metadata: :class:`~obspy.core.inventory.inventory.Inventory` or
            :class:`~obspy.io.xseed Parser` or str or dict
        :param metadata: Response information of instrument. See notes in
            :meth:`PPSD.__init__` for details.
        """
        data = np.load(filename)
        ppsd = PPSD(Stats(), metadata=metadata)
        for key in NPZ_STORE_KEYS:
            setattr(ppsd, key, data[key])
        return ppsd
Beispiel #21
0
 def process_step(self, step, stream):
     """Filters stream for one step.
     Filters all traces in stream.
     Parameters
     ----------
     step : array element
         step holding variables for one filtering operation
     stream : obspy.core.Stream
         stream of data to filter
     Returns
     -------
     out : obspy.core.Stream
         stream containing 1 trace per original trace.
     """
     # gather variables from step
     input_sample_period = step["input_sample_period"]
     output_sample_period = step["output_sample_period"]
     window = np.array(step["window"])
     decimation = int(output_sample_period / input_sample_period)
     numtaps = len(window)
     window = window / sum(window)
     out = Stream()
     for trace in stream:
         starttime, data = self.align_trace(step, trace)
         # check that there is still enough data to filter
         if len(data) < numtaps:
             continue
         filtered = self.firfilter(data, window, decimation)
         stats = Stats(trace.stats)
         stats.delta = output_sample_period
         stats.data_interval = step["data_interval"]
         stats.data_interval_type = step["data_interval_type"]
         stats.filter_comments = step["filter_comments"]
         stats.starttime = starttime
         stats.npts = len(filtered)
         trace_out = self.create_trace(stats.channel, stats, filtered)
         out += trace_out
     return out
 def test_exception_reading_newer_npz(self):
     """
     Checks that an exception is properly raised when trying to read a npz
     that was written on a more recent ObsPy version (specifically that has
     a higher 'ppsd_version' number which is used to keep track of changes
     in PPSD and the npz file used for serialization).
     """
     msg = ("Trying to read/add a PPSD npz with 'ppsd_version=100'. This "
            "file was written on a more recent ObsPy version that very "
            "likely has incompatible changes in PPSD internal structure "
            "and npz serialization. It can not safely be read with this "
            "ObsPy version (current 'ppsd_version' is {!s}). Please "
            "consider updating your ObsPy installation.".format(
                PPSD(stats=Stats(), metadata=None).ppsd_version))
     # 1 - loading a npz
     data = np.load(self.example_ppsd_npz)
     # we have to load, modify 'ppsd_version' and save the npz file for the
     # test..
     items = {key: data[key] for key in data.files}
     # deliberately set a higher ppsd_version number
     items['ppsd_version'] = items['ppsd_version'].copy()
     items['ppsd_version'].fill(100)
     with NamedTemporaryFile() as tf:
         filename = tf.name
         with open(filename, 'wb') as fh:
             np.savez(fh, **items)
         with self.assertRaises(ObsPyException) as e:
             PPSD.load_npz(filename)
     self.assertEqual(str(e.exception), msg)
     # 2 - adding a npz
     ppsd = PPSD.load_npz(self.example_ppsd_npz)
     for method in (ppsd.add_npz, ppsd._add_npz):
         with NamedTemporaryFile() as tf:
             filename = tf.name
             with open(filename, 'wb') as fh:
                 np.savez(fh, **items)
             with self.assertRaises(ObsPyException) as e:
                 method(filename)
             self.assertEqual(str(e.exception), msg)
Beispiel #23
0
def _make_stats(theader, gheader, standard_orientation):
    """
    Make Stats object from information from several blocks.
    """
    sampling_rate = int(1000. / (gheader['base_scan'] / 16.))

    # get channel code
    component = str(theader['channel_code'])
    if standard_orientation:
        component = STANDARD_COMPONENT_MAP[component]
    chan = BAND_MAP[sampling_rate] + INSTRUMENT_CODE + component

    statsdict = dict(
        starttime=UTCDateTime(theader['time'] / 1000000.),
        sampling_rate=sampling_rate,
        npts=theader['samples'],
        network=str(theader['line_number']),
        station=str(theader['point']),
        location=str(theader['index']),
        channel=chan,
    )
    return Stats(statsdict)
Beispiel #24
0
def map_blocks_header(whole_list_reformat, data_block_headers):
    network = "CWBSN"
    station_id = str(data_block_headers["stationid"])
    event_time = data_block_headers["starttime"]
    channel = data_block_headers["channel"]
    sampling_rate = data_block_headers["sampling_rate"]
    starttime = data_block_headers["starttime"]
    instrument_type = data_block_headers["instrument_type"]
    list_of_single_station = list(filter(lambda staid: staid['stationid'] == station_id, whole_list_reformat))
    list_lt_evt_time = [element for element in list_of_single_station if
                        element['stet'] > event_time and element['stbt'] <= event_time ]
    if len(list_lt_evt_time) != 1:
        raise AttributeError("station operational date ambiguity")
    else:
        station = list_lt_evt_time[0]["station"]
        stla = list_lt_evt_time[0]["stla"]
        stlo = list_lt_evt_time[0]["stlo"]
        stel = list_lt_evt_time[0]["stel"]
        trace_stats = Stats(header={"station": station, "sampling_rate": sampling_rate,
                                    "starttime": starttime, "channel": channel, "network": network,
                                    "stla": stla, "stlo": stlo, "stel": stel, "instrument_type": instrument_type})
    return trace_stats
Beispiel #25
0
def read_specfem_seismogram(output_files, network, station, band):
    st = Stream()
    for component in 'ZNE':
        channel = '%sX%s' % (band, component)
        filename = os.path.join(output_files,
                                '%s.%s.%s.sem.ascii' % (network, station,
                                                        channel))
        tmp = np.genfromtxt(filename)

        stats = Stats()
        stats.network = network
        stats.station = station
        stats.channel = channel
        stats.delta = tmp[1, 0] - tmp[0, 0]
        stats.npts = tmp.shape[0]
        stats.starttime = tmp[0, 0]

        tr = Trace(tmp[:, 1], stats)
        st += tr

    return st
def test_raw_input_client():
    """edge_test.RawInputClient_test.test_raw_input_client()
    """
    network = "NT"
    station = "BOU"
    channel = "MVH"
    location = "R0"
    data = [0, 1, 2, 3, 4, 5]
    starttime = UTCDateTime("2019-12-01")

    trace = Trace(
        numpy.array(data, dtype=numpy.float64),
        Stats({
            "channel": channel,
            "delta": 60.0,
            "location": location,
            "network": network,
            "npts": len(data),
            "starttime": starttime,
            "station": station,
        }),
    )

    client = MockRawInputClient(
        tag="tag",
        host="host",
        port="port",
        station=station,
        channel=channel,
        location=location,
        network=network,
    )
    trace_send = EdgeFactory()._convert_trace_to_int(trace.copy())
    client.send_trace("minute", trace_send)
    # verify data was sent
    assert_equal(len(client.last_send), 1)
Beispiel #27
0
 def test_nestedStats(self):
     """
     Various setter and getter tests.
     """
     #1
     stats = Stats()
     stats.test = dict()
     stats.test['test2'] = 'muh'
     self.assertEqual(stats.test.test2, 'muh')
     self.assertEqual(stats.test['test2'], 'muh')
     self.assertEqual(stats['test'].test2, 'muh')
     self.assertEqual(stats['test']['test2'], 'muh')
     stats.test['test2'] = 'maeh'
     self.assertEqual(stats.test.test2, 'maeh')
     self.assertEqual(stats.test['test2'], 'maeh')
     self.assertEqual(stats['test'].test2, 'maeh')
     self.assertEqual(stats['test']['test2'], 'maeh')
     #2 - multiple initialization
     stats = Stats({'muh': 'meah'})
     stats2 = Stats(Stats(Stats(stats)))
     self.assertEqual(stats2.muh, 'meah')
     #3 - check conversion to AttribDict
     stats = Stats()
     stats.sub1 = {'muh': 'meah'}
     stats.sub2 = AttribDict({'muh2': 'meah2'})
     stats2 = Stats(stats)
     self.assertTrue(isinstance(stats.sub1, AttribDict))
     self.assertTrue(isinstance(stats.sub2, AttribDict))
     self.assertEqual(stats2.sub1.muh, 'meah')
     self.assertEqual(stats2.sub2.muh2, 'meah2')
Beispiel #28
0
def readSEISAN(filename, headonly=False, **kwargs):  # @UnusedVariable
    """
    Reads a SEISAN file and returns an ObsPy Stream object.

    .. warning::
        This function should NOT be called directly, it registers via the
        ObsPy :func:`~obspy.core.stream.read` function, call this instead.

    :type filename: str
    :param filename: SEISAN file to be read.
    :rtype: :class:`~obspy.core.stream.Stream`
    :return: A ObsPy Stream object.

    .. rubric:: Example

    >>> from obspy import read
    >>> st = read("/path/to/2001-01-13-1742-24S.KONO__004")
    >>> st  # doctest: +ELLIPSIS
    <obspy.core.stream.Stream object at 0x...>
    >>> print(st)  # doctest: +ELLIPSIS
    4 Trace(s) in Stream:
    .KONO.0.B0Z | 2001-01-13T17:45:01.999000Z - ... | 20.0 Hz, 6000 samples
    .KONO.0.L0Z | 2001-01-13T17:42:24.924000Z - ... | 1.0 Hz, 3542 samples
    .KONO.0.L0N | 2001-01-13T17:42:24.924000Z - ... | 1.0 Hz, 3542 samples
    .KONO.0.L0E | 2001-01-13T17:42:24.924000Z - ... | 1.0 Hz, 3542 samples
    """
    def _readline(fh, length=80):
        data = fh.read(length + 8)
        end = length + 4
        start = 4
        return data[start:end]

    # read data chunk from given file
    fh = open(filename, 'rb')
    data = fh.read(80 * 12)
    # get version info from file
    (byteorder, arch, _version) = _getVersion(data)
    # fetch lines
    fh.seek(0)
    # start with event file header
    # line 1
    data = _readline(fh)
    number_of_channels = int(data[30:33])
    # calculate number of lines with channels
    number_of_lines = number_of_channels // 3 + (number_of_channels % 3 and 1)
    if number_of_lines < 10:
        number_of_lines = 10
    # line 2
    data = _readline(fh)
    # line 3
    for _i in xrange(0, number_of_lines):
        data = _readline(fh)
    # now parse each event file channel header + data
    stream = Stream()
    dlen = arch / 8
    dtype = byteorder + 'i' + str(dlen)
    stype = '=i' + str(dlen)
    for _i in xrange(number_of_channels):
        # get channel header
        temp = _readline(fh, 1040)
        # create Stats
        header = Stats()
        header['network'] = (temp[16] + temp[19]).strip()
        header['station'] = temp[0:5].strip()
        header['location'] = (temp[7] + temp[12]).strip()
        header['channel'] = (temp[5:7] + temp[8]).strip()
        header['sampling_rate'] = float(temp[36:43])
        header['npts'] = int(temp[43:50])
        # create start and end times
        year = int(temp[9:12]) + 1900
        month = int(temp[17:19])
        day = int(temp[20:22])
        hour = int(temp[23:25])
        mins = int(temp[26:28])
        secs = float(temp[29:35])
        header['starttime'] = UTCDateTime(year, month, day, hour, mins) + secs
        if headonly:
            # skip data
            fh.seek(dlen * (header['npts'] + 2), 1)
            stream.append(Trace(header=header))
        else:
            # fetch data
            data = np.fromfile(fh, dtype=dtype, count=header['npts'] + 2)
            # convert to system byte order
            data = np.require(data, stype)
            stream.append(Trace(data=data[2:], header=header))
    return stream
Beispiel #29
0
def moveout_test(PSS_file, q, phase):
    """
    Creates synthetic PRFs and stacks them after depth migration.

    Parameters
    ----------
    PSS_file : str
        Filename of raysum file containing P-Sv-Sh traces.
    q : float
        Slowness [s/m].
    phase : str
        Either "P" for Ps or "S" for Sp.

    Returns
    -------
    z : np.array
        Depth vector.
    stack : np.array
        Receiver function stack.
    RF_mo : np.array
        Matrix containing all depth migrated RFs.
    RF : np.array
        Matrix containing all RFs.
    dt : float
        Sampling interval.
    PSS : np.array
        Matrix containing all traces in P-Sv-Sh.

    """
    rayp = q * 1.111949e5
    PSS, dt, M, N, shift = read_raysum(phase, PSS_file=PSS_file)

    # Create receiver functions
    RF = []
    RF_mo = []
    stats = Stats()
    stats.npts = N
    stats.delta = dt
    stats.starttime = UTCDateTime(0)

    for i in range(M):
        if phase == "P":
            data, _, IR = it(PSS[i, 0, :],
                             PSS[i, 1, :],
                             dt,
                             shift=shift,
                             width=4)
        elif phase == "S":
            data, _, _ = it(PSS[i, 1, :],
                            PSS[i, 0, :],
                            dt,
                            shift=shift,
                            width=4)
        RF.append(data)
        z, rfc = moveout(data,
                         stats,
                         UTCDateTime(shift),
                         rayp[i],
                         phase,
                         fname="raysum.dat")
        RF_mo.append(rfc)
    stack = np.average(RF_mo, axis=0)
    plt.close('all')
    plt.figure()
    for mo in RF_mo:
        plt.plot(z, mo)
    return z, stack, RF_mo, RF, dt, PSS
Beispiel #30
0
def rf_test(phase,
            dip,
            rfloc='output/waveforms/RF',
            geom_file='3D.geom',
            decon_meth='it'):
    """
    Creates synthetic PRFs from Raysum data.

    Parameters
    ----------
    phase : string
        "P" or "S".
    dip : int
        Dip of the LAB in deg, determines, which files to use
    rfloc : The parental directory, in which the RFs are saved.
    geom_file : str, optional
        Filename of the geometry file

    Returns
    -------
    rfs: list
        List of RFTrace objects. Will in addition be saved in SAC format.

    """
    # Determine filenames
    PSS_file = []
    for i in range(16):
        PSS_file.append('3D_' + str(dip) + '_' + str(i) + '.tr')

    # Read geometry
    baz, q, dN, dE = read_geom(geom_file, phase)

    # statlat = dN/(DEG2KM*1000)
    d = np.sqrt(np.square(dN) + np.square(dE))
    az = np.rad2deg(np.arccos(dN / d))
    i = np.where(dE < 0)
    az[i] = az[i] + 180
    statlat = []
    statlon = []
    for azimuth, delta in zip(az, d):
        if delta == 0:
            statlat.append(0)
            statlon.append(0)
            continue
        coords = Geodesic.WGS84.Direct(0, 0, azimuth, delta)
        statlat.append(coords["lat2"])
        statlon.append(coords["lon2"])
    #         for n, longitude in enumerate(lon):
#             y, _, _ = gps2dist_azimuth(latitude, 0, latitude, longitude)
# statlon = dE/(DEG2KM*1000)
    rayp = q * DEG2KM * 1000

    # Read traces
    stream = []

    for f in PSS_file:
        PSS, dt, _, N, shift = read_raysum(phase, PSS_file=f)
        stream.append(PSS)

    streams = np.vstack(stream)
    del stream

    M = len(baz)

    if M != streams.shape[0]:
        raise ValueError([
            "Number of traces", streams.shape[0], """does not
             equal the number of backazimuths in the geom file""", M
        ])

    rfs = []
    odir = os.path.join(rfloc, phase, 'raysum', str(dip))
    ch = ['BHP', 'BHV', 'BHH']  # Channel names

    os.makedirs(odir, exist_ok=True)

    # Create RF objects
    for i, st in enumerate(streams):
        s = Stream()
        for j, tr in enumerate(st):
            stats = Stats()
            stats.npts = N
            stats.delta = dt
            stats.st  # if old:
            stats.channel = ch[j]
            stats.network = 'RS'
            stats.station = str(dip)
            s.append(Trace(data=tr, header=stats))

        # Create info dictionary for rf creation
        info = {
            'onset': [UTCDateTime(0) + shift],
            'starttime': [UTCDateTime(0)],
            'statlat': statlat[i],
            'statlon': statlon[i],
            'statel': 0,
            'rayp_s_deg': [rayp[i]],
            'rbaz': [baz[i]],
            'rdelta': [np.nan],
            'ot_ret': [0],
            'magnitude': [np.nan],
            'evt_depth': [np.nan],
            'evtlon': [np.nan],
            'evtlat': [np.nan]
        }

        rf = createRF(s, phase=phase, method=decon_meth, info=info)

        # Write RF
        rf.write(os.path.join(odir, str(i) + '.sac'), format='SAC')
        rfs.append(rf)

    return rfs, statlat, statlon
Beispiel #31
0
    def test_ppsd_restricted_stacks(self, state, image_path):
        """
        Test PPSD.calculate_histogram() with restrictions to what data should
        be stacked. Also includes image tests.
        """
        # set up a bogus PPSD, with fixed random psds but with real start times
        # of psd pieces, to facilitate testing the stack selection.
        ppsd = PPSD(stats=Stats(dict(sampling_rate=150)),
                    metadata=None,
                    db_bins=(-200, -50, 20.),
                    period_step_octaves=1.4)
        # change data to nowadays used nanoseconds POSIX timestamp
        ppsd._times_processed = [
            UTCDateTime(t)._ns for t in np.load(
                os.path.join(state.path, "ppsd_times_processed.npy")).tolist()
        ]
        np.random.seed(1234)
        ppsd._binned_psds = [
            arr for arr in np.random.uniform(-200, -50, (
                len(ppsd._times_processed), len(ppsd.period_bin_centers)))
        ]

        # Test callback function that selects a fixed random set of the
        # timestamps.  Also checks that we get passed the type we expect,
        # which is 1D numpy ndarray of int type.
        def callback(t_array):
            assert isinstance(t_array, np.ndarray)
            assert t_array.shape == (len(ppsd._times_processed), )
            assert np.issubdtype(t_array.dtype, np.integer)
            np.random.seed(1234)
            res = np.random.randint(0, 2, len(t_array)).astype(bool)
            return res

        # test several different sets of stack criteria, should cover
        # everything, even with lots of combined criteria
        stack_criteria_list = [
            dict(starttime=UTCDateTime(2015, 3, 8), month=[2, 3, 5, 7, 8]),
            dict(endtime=UTCDateTime(2015, 6, 7),
                 year=[2015],
                 time_of_weekday=[(1, 0, 24), (2, 0, 24), (-1, 0, 11)]),
            dict(year=[2013, 2014, 2016, 2017], month=[2, 3, 4]),
            dict(month=[1, 2, 5, 6, 8], year=2015),
            dict(isoweek=[4, 5, 6, 13, 22, 23, 24, 44, 45]),
            dict(time_of_weekday=[(5, 22, 24), (6, 0, 2), (6, 22, 24)]),
            dict(callback=callback, month=[1, 3, 5, 7]),
            dict(callback=callback)
        ]
        expected_selections = np.load(
            os.path.join(state.path, "ppsd_stack_selections.npy"))

        # test every set of criteria
        for stack_criteria, expected_selection in zip(stack_criteria_list,
                                                      expected_selections):
            selection_got = ppsd._stack_selection(**stack_criteria)
            np.testing.assert_array_equal(selection_got, expected_selection)

        plot_kwargs = dict(max_percentage=15,
                           xaxis_frequency=True,
                           period_lim=(0.01, 50))
        ppsd.calculate_histogram(**stack_criteria_list[1])
        fig = ppsd.plot(show=False, **plot_kwargs)

        fig.axes[1].set_xlim(left=fig.axes[1].get_xlim()[0] - 2)
        image_path_1 = image_path.parent / 'test_ppsd_restricted_stacks_1.png'
        with np.errstate(under='ignore'):
            fig.savefig(image_path_1)

        # test it again, checking that updating an existing plot with different
        # stack selection works..
        #  a) we start with the stack for the expected image and test that it
        #     matches (like above):
        ppsd.calculate_histogram(**stack_criteria_list[1])
        image_path_2 = image_path.parent / 'test_ppsd_restricted_stacks_2.png'
        with np.errstate(under='ignore'):
            fig.savefig(image_path_2)

        ppsd.calculate_histogram(**stack_criteria_list[1])
        image_path_3 = image_path.parent / 'test_ppsd_restricted_stacks_3.png'
        ppsd._plot_histogram(fig=fig, draw=True)
        with np.errstate(under='ignore'):
            fig.savefig(image_path_3)
Beispiel #32
0
 def test_component(self):
     """
     Test setting and getting of component.
     """
     stats = Stats()
     # Channel with 3 characters
     stats.channel = 'HHZ'
     self.assertEqual(stats.component, 'Z')
     stats.component = 'L'
     self.assertEqual(stats.component, 'L')
     self.assertEqual(stats.channel, 'HHL')
     stats['component'] = 'Q'
     self.assertEqual(stats['component'], 'Q')
     self.assertEqual(stats.channel, 'HHQ')
     # Channel with 1 character as component
     stats.channel = 'N'
     stats.component = 'E'
     self.assertEqual(stats.channel, 'E')
     self.assertEqual(stats.component, 'E')
     # Channel with 0 characters
     stats.channel = ''
     self.assertEqual(stats.component, '')
     stats.component = 'Z'
     self.assertEqual(stats.channel, 'Z')
     # Components must be single character
     stats.channel = 'HHZ'
     with self.assertRaises(ValueError):
         stats.component = ''
     self.assertEqual(stats.channel, 'HHZ')
     with self.assertRaises(ValueError):
         stats.component = 'ZZ'
     self.assertEqual(stats.channel, 'HHZ')
Beispiel #33
0
def _read_asc(filename,
              headonly=False,
              skip=0,
              delta=None,
              length=None,
              **kwargs):  # @UnusedVariable
    """
    Reads a Seismic Handler ASCII file and returns an ObsPy Stream object.

    .. warning::
        This function should NOT be called directly, it registers via the
        ObsPy :func:`~obspy.core.stream.read` function, call this instead.

    :type filename: str
    :param filename: ASCII file to be read.
    :type headonly: bool, optional
    :param headonly: If set to True, read only the head. This is most useful
        for scanning available data in huge (temporary) data sets.
    :type skip: int, optional
    :param skip: Number of lines to be skipped from top of file. If defined
        only one trace is read from file.
    :type delta: float, optional
    :param delta: If ``skip`` is used, ``delta`` defines sample offset in
        seconds.
    :type length: int, optional
    :param length: If ``skip`` is used, ``length`` defines the number of values
        to be read.
    :rtype: :class:`~obspy.core.stream.Stream`
    :return: A ObsPy Stream object.

    .. rubric:: Example

    >>> from obspy import read
    >>> st = read("/path/to/QFILE-TEST-ASC.ASC")
    >>> st  # doctest: +ELLIPSIS
    <obspy.core.stream.Stream object at 0x...>
    >>> print(st)  # doctest: +ELLIPSIS
    3 Trace(s) in Stream:
    .TEST..BHN | 2009-10-01T12:46:01.000000Z - ... | 20.0 Hz, 801 samples
    .TEST..BHE | 2009-10-01T12:46:01.000000Z - ... | 20.0 Hz, 801 samples
    .WET..HHZ  | 2010-01-01T01:01:05.999000Z - ... | 100.0 Hz, 4001 samples
    """
    fh = open(filename, 'rt')
    # read file and split text into channels
    channels = []
    headers = {}
    data = io.StringIO()
    for line in fh.readlines()[skip:]:
        if line.isspace():
            # blank line
            # check if any data fetched yet
            if len(headers) == 0 and data.tell() == 0:
                continue
            # append current channel
            data.seek(0)
            channels.append((headers, data))
            # create new channel
            headers = {}
            data = io.StringIO()
            if skip:
                # if skip is set only one trace is read, everything else makes
                # no sense.
                break
            continue
        elif line[0].isalpha():
            # header entry
            key, value = line.split(':', 1)
            key = key.strip()
            value = value.strip()
            headers[key] = value
        elif not headonly:
            # data entry - may be written in multiple columns
            data.write(line.strip() + ' ')
    fh.close()
    # create ObsPy stream object
    stream = Stream()
    # custom header
    custom_header = {}
    if delta:
        custom_header["delta"] = delta
    if length:
        custom_header["npts"] = length

    for headers, data in channels:
        # create Stats
        header = Stats(custom_header)
        header['sh'] = {}
        channel = [' ', ' ', ' ']
        # generate headers
        for key, value in headers.items():
            if key == 'DELTA':
                header['delta'] = float(value)
            elif key == 'LENGTH':
                header['npts'] = int(value)
            elif key == 'CALIB':
                header['calib'] = float(value)
            elif key == 'STATION':
                header['station'] = value
            elif key == 'COMP':
                channel[2] = value[0]
            elif key == 'CHAN1':
                channel[0] = value[0]
            elif key == 'CHAN2':
                channel[1] = value[0]
            elif key == 'START':
                # 01-JAN-2009_01:01:01.0
                # 1-OCT-2009_12:46:01.000
                header['starttime'] = to_utcdatetime(value)
            else:
                # everything else gets stored into sh entry
                if key in SH_KEYS_INT:
                    header['sh'][key] = int(value)
                elif key in SH_KEYS_FLOAT:
                    header['sh'][key] = float(value)
                else:
                    header['sh'][key] = value
        # set channel code
        header['channel'] = ''.join(channel)
        if headonly:
            # skip data
            stream.append(Trace(header=header))
        else:
            # read data
            data = loadtxt(data, dtype=np.float32, ndmin=1)

            # cut data if requested
            if skip and length:
                data = data[:length]

            # use correct value in any case
            header["npts"] = len(data)

            stream.append(Trace(data=data, header=header))
    return stream
Beispiel #34
0
    def append(self, trace, gap_overlap_check=False, verbose=False):
        """
        Appends a Trace object to this RtTrace.

        Registered real-time processing will be applied to copy of appended
        Trace object before it is appended.  This RtTrace will be truncated
        from the beginning to RtTrace.max_length, if specified.
        Sampling rate, data type and trace.id of both traces must match.

        :type trace: :class:`~obspy.core.trace.Trace`
        :param trace:  :class:`~obspy.core.trace.Trace` object to append to
            this RtTrace
        :type gap_overlap_check: bool, optional
        :param gap_overlap_check: Action to take when there is a gap or overlap
            between the end of this RtTrace and start of appended Trace:
                If True, raise TypeError.
                If False, all trace processing memory will be re-initialized to
                    prevent false signal in processed trace.
            (default is ``True``).
        :type verbose: bool, optional
        :param verbose: Print additional information to stdout
        :return: NumPy :class:`np.ndarray` object containing processed trace
            data from appended Trace object.
        """
        if not isinstance(trace, Trace):
            # only add Trace objects
            raise TypeError("Only obspy.core.trace.Trace objects are allowed")

        # sanity checks
        if self.have_appended_data:
            #  check id
            if self.getId() != trace.getId():
                raise TypeError("Trace ID differs:", self.getId(),
                                trace.getId())
            #  check sample rate
            if self.stats.sampling_rate != trace.stats.sampling_rate:
                raise TypeError("Sampling rate differs:",
                                self.stats.sampling_rate,
                                trace.stats.sampling_rate)
            #  check calibration factor
            if self.stats.calib != trace.stats.calib:
                raise TypeError("Calibration factor differs:",
                                self.stats.calib, trace.stats.calib)
            # check data type
            if self.data.dtype != trace.data.dtype:
                raise TypeError("Data type differs:", self.data.dtype,
                                trace.data.dtype)
        # TODO: IMPORTANT? Should improve check for gaps and overlaps
        # and handle more elegantly
        # check times
        gap_or_overlap = False
        if self.have_appended_data:
            #delta = int(math.floor(\
            #    round((rt.stats.starttime - lt.stats.endtime) * sr, 5) )) - 1
            diff = trace.stats.starttime - self.stats.endtime
            delta = diff * self.stats.sampling_rate - 1.0
            if verbose:
                msg = "%s: Overlap/gap of (%g) samples in data: (%s) (%s) " + \
                    "diff=%gs  dt=%gs"
                print msg % (self.__class__.__name__, delta,
                             self.stats.endtime, trace.stats.starttime, diff,
                             self.stats.delta)
            if delta < -0.1:
                msg = "Overlap of (%g) samples in data: (%s) (%s) diff=%gs" + \
                    "  dt=%gs"
                msg = msg % (-delta, self.stats.endtime, trace.stats.starttime,
                             diff, self.stats.delta)
                if gap_overlap_check:
                    raise TypeError(msg)
                gap_or_overlap = True
            if delta > 0.1:
                msg = "Gap of (%g) samples in data: (%s) (%s) diff=%gs" + \
                    "  dt=%gs"
                msg = msg % (delta, self.stats.endtime, trace.stats.starttime,
                             diff, self.stats.delta)
                if gap_overlap_check:
                    raise TypeError(msg)
                gap_or_overlap = True
            if gap_or_overlap:
                msg += " - Trace processing memory will be re-initialized."
                warnings.warn(msg, UserWarning)
            else:
                # correct start time to pin absolute trace timing to start of
                # appended trace, this prevents slow drift of nominal trace
                # timing from absolute time when nominal sample rate differs
                # from true sample rate
                self.stats.starttime = \
                    self.stats.starttime + diff - self.stats.delta
                if verbose:
                    print "%s: self.stats.starttime adjusted by: %gs" \
                        % (self.__class__.__name__, diff - self.stats.delta)
        # first apply all registered processing to Trace
        for proc in self.processing:
            process_name, options, rtmemory_list = proc
            # if gap or overlap, clear memory
            if gap_or_overlap and rtmemory_list != None:
                for n in range(len(rtmemory_list)):
                    rtmemory_list[n] = RtMemory()
            # apply processing
            trace = trace.copy()
            dtype = trace.data.dtype
            if hasattr(process_name, '__call__'):
                # check if direct function call
                trace.data = process_name(trace.data, **options)
            else:
                # got predefined function
                func = REALTIME_PROCESS_FUNCTIONS[process_name.lower()][0]
                options['rtmemory_list'] = rtmemory_list
                trace.data = func(trace, **options)
            # assure dtype is not changed
            trace.data = np.require(trace.data, dtype=dtype)
        # if first data, set stats
        if not self.have_appended_data:
            self.data = np.array(trace.data)
            self.stats = Stats(header=trace.stats)
            self.have_appended_data = True
            return trace
        # handle all following data sets
        # fix Trace.__add__ parameters
        # TODO: IMPORTANT? Should check for gaps and overlaps and handle
        # more elegantly
        sum_trace = Trace.__add__(self,
                                  trace,
                                  method=0,
                                  interpolation_samples=0,
                                  fill_value='latest',
                                  sanity_checks=True)
        # Trace.__add__ returns new Trace, so update to this RtTrace
        self.data = sum_trace.data
        # left trim if data length exceeds max_length
        if self.max_length != None:
            max_samples = int(self.max_length * self.stats.sampling_rate + 0.5)
            if np.size(self.data) > max_samples:
                starttime = self.stats.starttime + (np.size(self.data) - \
                    max_samples) / self.stats.sampling_rate
                self._ltrim(starttime,
                            pad=False,
                            nearest_sample=True,
                            fill_value=None)
        return trace
Beispiel #35
0
def raw_import(gzip_filename):
    """
    Makes a 'raw' stream file from the gzipped csv file.
    The csv file has been downloaded from the JAXA website.
    The method makes a raw stream which does not yet have the frames
    reconstructed.

    :type gzip_filename: str
    :param gzip_filename: gzipped filename of the CSV file to be read.
    :rtype: :class:`~obspy.core.stream.Stream`
    :return: A ObsPy Stream object.

    """

    # read the gzipped csv file
    with gzip.open(gzip_filename, 'rt') as fh:
        # read file
        buf = []
        header = next(fh).split(',')

        # read the header
        # it should contain either 1 channel or 3
        if len(header) == 8:
            # the RESP files use either 'MH1', 'MH2', 'MHZ'
            # the JAXA files use 'LPX', 'LPY', 'LPZ'
            # X should point north, Y east, but this is not always the case
            # so we rename LPX to MH1, and LPY to MH2
            channels = ['MH1', 'MH2', 'MHZ']
            raw_channels = ['_M1', '_M2', '_MZ']
            for line in fh:
                temp = line.split(',')

                try:
                    temp[4] = UTCDateTime(temp[4])
                except ValueError as e:
                    # this is a specific error which is found in the csv file
                    if temp[4] == '1975-49-11 19:13:04.232000':
                        temp[4] = UTCDateTime('1975-09-11 19:13:04.232000')
                    else:
                        raise

                try:
                    temp[0] = int(temp[0])
                except ValueError as e:
                    # this is a specific error which is found in the csv file
                    if temp[4] == UTCDateTime(
                            '1975-09-15 12:53:36.849000') and temp[0] == '<3':
                        temp[0] = 83
                    else:
                        raise

                buf.append(
                    (temp[1], temp[2], temp[4], int(temp[0]), int(temp[3]),
                     int(temp[5]), int(temp[6]), int(temp[7])))

        elif len(header) == 6:
            channels = ['SPZ']
            raw_channels = ['_SZ']
            for line in fh:
                # check the manual list of points which have been removed
                if line in remove_manually:
                    continue

                temp = line.split(',')
                # the original order:
                # frame_count, ap_station, ground_station, nc, time, spz
                # make a tuple (in a new order so that it can be sorted):
                # ap_station, ground_station, time, frame_count, nc, spz
                buf.append(
                    (temp[1], temp[2], UTCDateTime(temp[4]), int(temp[0]),
                     int(temp[3]), int(temp[5])))

    # sort by ap_station, ground_station and time (and also everything else,
    # but that won't matter)
    buf.sort()

    stream = Stream()
    data_x = []
    data_y = []
    data_z = []
    data_sz = []
    abs_times = []
    frame_count_ncs = []
    corr_frame_count_ncs = []

    stats = Stats()
    stats.delta = DELTA
    network = 'XA'
    last_id = None

    for data in buf:

        # read in the data from the buffer
        station = data[0].rjust(3, 'S')
        ground_station = data[1].rjust(2, '0')
        time = data[2]

        frame_count = data[3]
        nc = data[4]
        # create a combination of frame count and nc - from 0.0 to 89.75
        frame_count_nc = float(frame_count) + (float(nc) - 1.) * 0.25

        id = "{0:s}.{1:s}.{2:s}.{3:s}".format(network, station, ground_station,
                                              channels[0])

        # check whether we are adding to an existing one, or creating a new one
        if (last_id is None or last_id != id):
            # before creating the new one, add previous trace(s) to the stream
            if len(abs_times) > 0:
                _make_traces(stream=stream,
                             stats=stats,
                             header=header,
                             channels=raw_channels,
                             data_x=data_x,
                             data_y=data_y,
                             data_z=data_z,
                             data_sz=data_sz,
                             abs_times=abs_times,
                             frame_count_ncs=frame_count_ncs)

            data_x = []
            data_y = []
            data_z = []
            data_sz = []
            abs_times = []
            frame_count_ncs = []

            stats = Stats()
            stats.delta = DELTA
            stats.starttime = time
            stats.network = network
            stats.station = station
            stats.location = ground_station

        # add the data) from any line
        if len(header) == 8:
            data_x.append(data[5])
            data_y.append(data[6])
            data_z.append(data[7])
        else:
            data_sz.append(data[5])
        abs_times.append(time.timestamp)
        frame_count_ncs.append(frame_count_nc)

        last_id = id

    # add the last one
    if len(abs_times) > 0:
        _make_traces(stream=stream,
                     stats=stats,
                     header=header,
                     channels=raw_channels,
                     data_x=data_x,
                     data_y=data_y,
                     data_z=data_z,
                     data_sz=data_sz,
                     abs_times=abs_times,
                     frame_count_ncs=frame_count_ncs)

    return stream
Beispiel #36
0
        year=2009,
        month=4,
        day=7,
        hour=20,
        minute=12,
        second=55,
    )

    station = Stats({
        'longitude': -149.8174,
        'latitude': 61.5919,
        'starttime': origin_time - 100.,
        'endtime': origin_time - 100.,
        'npts': 19999,
        'delta': 0.02,
        'station': 'BIGB',
        'location': '',
        'id': 'YV.BIGB',
        'catalog_origin_time': origin_time,
        'catalog_depth': 33033.5998535,
        'catalog_distance': 15.8500907298,
        'catalog_azimuth': 345.527768889,
    })

    origin = Origin(
        time=origin_time,
        latitude=61.4542007446,
        longitude=-149.742797852,
        depth=33033.5998535,
    )
Beispiel #37
0
        return None
    return x


headers = {}
data = StringIO()

# read file
fh = open(filename_in, 'rt')
for i in xrange(64):
    key, value = fh.readline().strip().split(':', 1)
    headers[key.strip()] = value.strip()

# create ObsPy stream object
stream = Stream()
header = Stats()
header['dyna'] = {}

header['network'] = headers['NETWORK']
header['station'] = headers['STATION_CODE']
header['location'] = headers['LOCATION']
header['channel'] = headers['STREAM']
try:
    header['starttime'] = toUTCDateTime(
        headers['DATE_TIME_FIRST_SAMPLE_YYYYMMDD_HHMMSS']
    )  # use toUTCDateTime to convert from DYNA format
except:
    header['starttime'] = toUTCDateTime('19700101_000000')
header['sampling_rate'] = 1 / float(headers['SAMPLING_INTERVAL_S'])
header['delta'] = float(headers['SAMPLING_INTERVAL_S'])
header['npts'] = int(headers['NDATA'])
Beispiel #38
0
def xcorr2(tr1,
           tr2,
           sta1_inv=None,
           sta2_inv=None,
           instrument_response_output='vel',
           water_level=50.,
           window_seconds=3600,
           window_overlap=0.1,
           window_buffer_length=0,
           interval_seconds=86400,
           taper_length=0.05,
           resample_rate=None,
           flo=None,
           fhi=None,
           clip_to_2std=False,
           whitening=False,
           whitening_window_frequency=0,
           one_bit_normalize=False,
           envelope_normalize=False,
           verbose=1,
           logger=None):

    # Length of window_buffer in seconds
    window_buffer_seconds = window_buffer_length * window_seconds
    adjusted_taper_length = taper_length
    if (window_buffer_seconds):
        # adjust taper length
        adjusted_taper_length = taper_length / (1. + window_buffer_length * 2.)
    # end if

    sr1 = tr1.stats.sampling_rate
    sr2 = tr2.stats.sampling_rate
    sr1_orig = sr1
    sr2_orig = sr2
    tr1_d_all = tr1.data  # refstn
    tr2_d_all = tr2.data
    lentr1_all = tr1_d_all.shape[0]
    lentr2_all = tr2_d_all.shape[0]
    window_samples_1 = (window_seconds + 2 * window_buffer_seconds) * sr1
    window_samples_2 = (window_seconds + 2 * window_buffer_seconds) * sr2
    interval_samples_1 = interval_seconds * sr1
    interval_samples_2 = interval_seconds * sr2
    sr = 0
    resll = []

    # set day-aligned start-indices
    maxStartTime = max(tr1.stats.starttime, tr2.stats.starttime)
    dayAlignedStartTime = UTCDateTime(year=maxStartTime.year,
                                      month=maxStartTime.month,
                                      day=maxStartTime.day)
    itr1s = (dayAlignedStartTime - tr1.stats.starttime) * sr1
    itr2s = (dayAlignedStartTime - tr2.stats.starttime) * sr2

    if (resample_rate):
        sr1 = resample_rate
        sr2 = resample_rate
    # end if
    sr = max(sr1, sr2)
    xcorlen = int(2 * window_seconds * sr - 1)
    fftlen = 2**(int(np.log2(xcorlen)) + 1)

    intervalCount = 0
    windowsPerInterval = [
    ]  # Stores the number of windows processed per interval
    intervalStartSeconds = []
    intervalEndSeconds = []
    while itr1s < lentr1_all and itr2s < lentr2_all:
        itr1e = min(lentr1_all, itr1s + interval_samples_1)
        itr2e = min(lentr2_all, itr2s + interval_samples_2)

        while ((itr1s < 0) or (itr2s < 0)):
            itr1s += (window_samples_1 - 2*window_buffer_seconds*sr1_orig) - \
                     (window_samples_1 - 2*window_buffer_seconds*sr1_orig) * window_overlap
            itr2s += (window_samples_2 - 2*window_buffer_seconds*sr2_orig) - \
                     (window_samples_2 - 2*window_buffer_seconds*sr2_orig) * window_overlap
        # end while

        if (np.fabs(itr1e - itr1s) < sr1_orig
                or np.fabs(itr2e - itr2s) < sr2_orig):
            itr1s = itr1e
            itr2s = itr2e
            continue
        # end if

        if (tr1.stats.starttime + itr1s / sr1_orig !=
                tr2.stats.starttime + itr2s / sr2_orig):
            if (logger): logger.warning('Detected misaligned traces..')

        windowCount = 0
        wtr1s = int(itr1s)
        wtr2s = int(itr2s)
        resl = []

        while wtr1s < itr1e and wtr2s < itr2e:
            wtr1e = int(min(itr1e, wtr1s + window_samples_1))
            wtr2e = int(min(itr2e, wtr2s + window_samples_2))

            # Discard small windows
            if ((wtr1e - wtr1s < window_samples_1)
                    or (wtr2e - wtr2s < window_samples_2)
                    or (wtr1e - wtr1s < sr1_orig)
                    or (wtr2e - wtr2s < sr2_orig)):
                wtr1s = int(np.ceil(itr1e))
                wtr2s = int(np.ceil(itr2e))
                continue
            # end if

            # Discard windows with masked regions, i.e. with gaps or windows that are all zeros
            if (not (np.ma.is_masked(tr1_d_all[wtr1s:wtr1e])
                     or np.ma.is_masked(tr2_d_all[wtr2s:wtr2e])
                     or np.sum(tr1_d_all[wtr1s:wtr1e]) == 0
                     or np.sum(tr2_d_all[wtr2s:wtr2e]) == 0)):

                #logger.info('%s, %s' % (tr1.stats.starttime + wtr1s / 200., tr1.stats.starttime + wtr1e / sr1_orig))
                #logger.info('%s, %s' % (tr2.stats.starttime + wtr2s / 200., tr2.stats.starttime + wtr2e / sr2_orig))

                tr1_d = np.array(tr1_d_all[wtr1s:wtr1e], dtype=np.float32)
                tr2_d = np.array(tr2_d_all[wtr2s:wtr2e], dtype=np.float32)

                # STEP 1: detrend
                tr1_d = signal.detrend(tr1_d)
                tr2_d = signal.detrend(tr2_d)

                # STEP 2: demean
                tr1_d -= np.mean(tr1_d)
                tr2_d -= np.mean(tr2_d)

                # STEP 3: remove response
                if (sta1_inv):
                    resp_tr1 = Trace(
                        data=tr1_d,
                        header=Stats(
                            header={
                                'sampling_rate':
                                sr1_orig,
                                'npts':
                                len(tr1_d),
                                'network':
                                tr1.stats.network,
                                'station':
                                tr1.stats.station,
                                'location':
                                tr1.stats.location,
                                'channel':
                                tr1.stats.channel,
                                'starttime':
                                tr1.stats.starttime + float(wtr1s) / sr1_orig,
                                'endtime':
                                tr1.stats.starttime + float(wtr1e) / sr1_orig
                            }))
                    try:
                        resp_tr1.remove_response(
                            inventory=sta1_inv,
                            output=instrument_response_output.upper(),
                            water_level=water_level)
                    except Exception as e:
                        print(e)
                    # end try

                    tr1_d = resp_tr1.data
                # end if

                # remove response
                if (sta2_inv):
                    resp_tr2 = Trace(
                        data=tr2_d,
                        header=Stats(
                            header={
                                'sampling_rate':
                                sr2_orig,
                                'npts':
                                len(tr2_d),
                                'network':
                                tr2.stats.network,
                                'station':
                                tr2.stats.station,
                                'location':
                                tr2.stats.location,
                                'channel':
                                tr2.stats.channel,
                                'starttime':
                                tr2.stats.starttime + float(wtr2s) / sr2_orig,
                                'endtime':
                                tr2.stats.starttime + float(wtr2e) / sr2_orig
                            }))
                    try:
                        resp_tr2.remove_response(
                            inventory=sta2_inv,
                            output=instrument_response_output.upper(),
                            water_level=water_level)
                    except Exception as e:
                        print(e)
                    # end try

                    tr2_d = resp_tr2.data
                # end if

                # STEPS 4, 5: resample after lowpass @ resample_rate/2 Hz
                if (resample_rate):
                    tr1_d = lowpass(tr1_d,
                                    resample_rate / 2.,
                                    sr1_orig,
                                    corners=2,
                                    zerophase=True)
                    tr2_d = lowpass(tr2_d,
                                    resample_rate / 2.,
                                    sr2_orig,
                                    corners=2,
                                    zerophase=True)

                    tr1_d = Trace(
                        data=tr1_d,
                        header=Stats(header={
                            'sampling_rate': sr1_orig,
                            'npts': window_samples_1
                        })).resample(resample_rate, no_filter=True).data
                    tr2_d = Trace(
                        data=tr2_d,
                        header=Stats(header={
                            'sampling_rate': sr2_orig,
                            'npts': window_samples_2
                        })).resample(resample_rate, no_filter=True).data
                # end if

                # STEP 6: Bandpass
                if (flo and fhi):
                    tr1_d = bandpass(tr1_d,
                                     flo,
                                     fhi,
                                     sr1,
                                     corners=2,
                                     zerophase=True)
                    tr2_d = bandpass(tr2_d,
                                     flo,
                                     fhi,
                                     sr2,
                                     corners=2,
                                     zerophase=True)
                # end if

                # STEP 7: time-domain normalization
                # clip to +/- 2*std
                if (clip_to_2std):
                    std_tr1 = np.std(tr1_d)
                    std_tr2 = np.std(tr2_d)
                    clip_indices_tr1 = np.fabs(tr1_d) > 2 * std_tr1
                    clip_indices_tr2 = np.fabs(tr2_d) > 2 * std_tr2

                    tr1_d[clip_indices_tr1] = 2 * std_tr1 * np.sign(
                        tr1_d[clip_indices_tr1])
                    tr2_d[clip_indices_tr2] = 2 * std_tr2 * np.sign(
                        tr2_d[clip_indices_tr2])
                # end if

                # 1-bit normalization
                if (one_bit_normalize):
                    tr1_d = np.sign(tr1_d)
                    tr2_d = np.sign(tr2_d)
                # end if

                # Apply Rhys Hawkins-style default time domain normalization
                if (clip_to_2std == 0 and one_bit_normalize == 0):
                    # 0-mean
                    tr1_d -= np.mean(tr1_d)
                    tr2_d -= np.mean(tr2_d)

                    # unit-std
                    tr1_d /= np.std(tr1_d)
                    tr2_d /= np.std(tr2_d)
                # end if

                # STEP 8: taper
                if (adjusted_taper_length > 0):
                    tr1_d = taper(
                        tr1_d,
                        int(np.round(adjusted_taper_length * tr1_d.shape[0])))
                    tr2_d = taper(
                        tr2_d,
                        int(np.round(adjusted_taper_length * tr2_d.shape[0])))
                # end if

                # STEP 9: spectral whitening
                if (whitening):
                    tr1_d = whiten(tr1_d,
                                   sr1,
                                   window_freq=whitening_window_frequency)
                    tr2_d = whiten(tr2_d,
                                   sr2,
                                   window_freq=whitening_window_frequency)

                    # STEP 10: taper
                    if (adjusted_taper_length > 0):
                        tr1_d = taper(
                            tr1_d,
                            int(
                                np.round(adjusted_taper_length *
                                         tr1_d.shape[0])))
                        tr2_d = taper(
                            tr2_d,
                            int(
                                np.round(adjusted_taper_length *
                                         tr2_d.shape[0])))
                    # end if
                # end if

                # STEP 11: Final bandpass
                # apply zero-phase bandpass
                if (flo and fhi):
                    tr1_d = bandpass(tr1_d,
                                     flo,
                                     fhi,
                                     sr1,
                                     corners=2,
                                     zerophase=True)
                    tr2_d = bandpass(tr2_d,
                                     flo,
                                     fhi,
                                     sr2,
                                     corners=2,
                                     zerophase=True)
                # end if

                if (window_buffer_seconds):
                    # extract window of interest from buffered window
                    tr1_d = tr1_d[int(window_buffer_seconds *
                                      sr1):-int(window_buffer_seconds * sr1)]
                    tr2_d = tr2_d[int(window_buffer_seconds *
                                      sr2):-int(window_buffer_seconds * sr2)]
                # end if

                # cross-correlate waveforms
                if (sr1 < sr2):
                    fftlen2 = fftlen
                    fftlen1 = int((fftlen2 * 1.0 * sr1) / sr)
                    rf = zeropad_ba(
                        fftn(zeropad(tr1_d, fftlen1), shape=[fftlen1]),
                        fftlen2) * fftn(zeropad(ndflip(tr2_d), fftlen2),
                                        shape=[fftlen2])
                elif (sr1 > sr2):
                    fftlen1 = fftlen
                    fftlen2 = int((fftlen1 * 1.0 * sr2) / sr)
                    rf = fftn(zeropad(tr1_d, fftlen1),
                              shape=[fftlen1]) * zeropad_ba(
                                  fftn(zeropad(ndflip(tr2_d), fftlen2),
                                       shape=[fftlen2]), fftlen1)
                else:
                    rf = fftn(zeropad(tr1_d, fftlen), shape=[fftlen]) * fftn(
                        zeropad(ndflip(tr2_d), fftlen), shape=[fftlen])
                # end if

                if (not np.isnan(rf).any()):
                    resl.append(rf)
                    windowCount += 1
                # end if
            # end if

            wtr1s += int(
                (window_samples_1 - 2 * window_buffer_seconds * sr1_orig) -
                (window_samples_1 - 2 * window_buffer_seconds * sr1_orig) *
                window_overlap)
            wtr2s += int(
                (window_samples_2 - 2 * window_buffer_seconds * sr2_orig) -
                (window_samples_2 - 2 * window_buffer_seconds * sr2_orig) *
                window_overlap)
        # end while (windows within interval)

        if (verbose > 1):
            if (logger):
                logger.info('\tProcessed %d windows in interval %d' %
                            (windowCount, intervalCount))
        # end fi

        intervalStartSeconds.append(itr1s / sr1_orig +
                                    tr1.stats.starttime.timestamp)
        intervalEndSeconds.append(itr1e / sr1_orig +
                                  tr1.stats.starttime.timestamp)
        itr1s = itr1e
        itr2s = itr2e
        intervalCount += 1

        # Append an array of zeros if no windows were processed for the current interval
        if (windowCount == 0):
            resl.append(np.zeros(fftlen))
            if (verbose > 1):
                if (logger):
                    logger.info(
                        '\tWarning: No windows processed due to gaps in data in current interval'
                    )
            # end if
        # end if

        windowsPerInterval.append(windowCount)

        if (windowCount > 0):
            mean = reduce((lambda tx, ty: tx + ty), resl) / float(windowCount)
        else:
            mean = reduce((lambda tx, ty: tx + ty), resl)
        # end if

        if (envelope_normalize):
            step = np.sign(np.fft.fftfreq(fftlen, 1.0 / sr))
            mean = mean + step * mean  # compute analytic
        # end if

        mean = ifftn(mean)

        if (envelope_normalize):
            # Compute magnitude of mean
            mean = np.abs(mean)
            normFactor = np.max(mean)

            # mean can be 0 for a null result
            if (normFactor > 0):
                mean /= normFactor
            # end if
        # end if

        resll.append(mean[:xcorlen])
    # end while (iteration over intervals)

    if (len(resll)):
        return np.array(resll), np.array(windowsPerInterval), \
               np.array(intervalStartSeconds, dtype='i8'), \
               np.array(intervalEndSeconds, dtype='i8'), \
               sr
    else:
        return None, None, None, None, None
Beispiel #39
0
def readTSPAIR(filename, headonly=False, **kwargs):  # @UnusedVariable
    """
    Reads a ASCII TSPAIR file and returns an ObsPy Stream object.

    .. warning::
        This function should NOT be called directly, it registers via the
        ObsPy :func:`~obspy.core.stream.read` function, call this instead.

    :type filename: str
    :param filename: ASCII file to be read.
    :type headonly: bool, optional
    :param headonly: If set to True, read only the headers. This is most useful
        for scanning available data in huge (temporary) data sets.
    :rtype: :class:`~obspy.core.stream.Stream`
    :return: A ObsPy Stream object.

    .. rubric:: Example

    >>> from obspy import read
    >>> st = read('/path/to/tspair.ascii')
    """
    fh = open(filename, "rt")
    # read file and split text into channels
    headers = {}
    key = None
    for line in fh:
        if line.isspace():
            # blank line
            continue
        elif line.startswith("TIMESERIES"):
            # new header line
            key = line
            headers[key] = StringIO()
        elif headonly:
            # skip data for option headonly
            continue
        elif key:
            # data entry - may be written in multiple columns
            headers[key].write(line.strip().split()[-1] + " ")
    fh.close()
    # create ObsPy stream object
    stream = Stream()
    for header, data in headers.iteritems():
        # create Stats
        stats = Stats()
        parts = header.replace(",", "").split()
        temp = parts[1].split("_")
        stats.network = temp[0]
        stats.station = temp[1]
        stats.location = temp[2]
        stats.channel = temp[3]
        stats.sampling_rate = parts[4]
        # quality only used in MSEED
        stats.mseed = AttribDict({"dataquality": temp[4]})
        stats.ascii = AttribDict({"unit": parts[-1]})
        stats.starttime = UTCDateTime(parts[6])
        stats.npts = parts[2]
        if headonly:
            # skip data
            stream.append(Trace(header=stats))
        else:
            data = _parse_data(data, parts[8])
            stream.append(Trace(data=data, header=stats))
    return stream
Beispiel #40
0
 def test_delta_zero(self):
     """
     Make sure you can set delta = 0. for #1989
     """
     stat = Stats()
     stat.delta = 0
Beispiel #41
0
def create_esm_acc(folder, event, station, num):
    """

    ESM recordings can be stored in advance or automatically downloaded from
    internet using a token file (:code:`token.txt`). To obtain the token file you need
    at first to register at: `https://esm-db.eu/` and then you can run the command::

        curl -X POST -F 'message={"user_email": "email","user_password": "******"}
        ' "https://esm-db.eu/esmws/generate-signed-message/1/query" > token.txt

    """

    # Import libraries
    import glob
    from obspy.core import Stats
    import numpy as np
    import os
    from zipfile import ZipFile
    import requests
    import sys

    if not os.path.isdir(folder):
        zip_output = 'output_' + str(num) + '.zip'

        params = (
            ('eventid', event),
            ('data-type', 'ACC'),
            ('station', station),
            ('format', 'ascii'),
        )

        files = {
            'message': ('path/to/token.txt', open('token.txt', 'rb')),
        }

        headers = {'Authorization': 'token {}'.format('token.txt')}

        url = 'https://esm-db.eu/esmws/eventdata/1/query'

        req = requests.post(url=url, params=params, files=files)

        if req.status_code == 200:
            with open(zip_output, "wb") as zf:
                zf.write(req.content)
        else:
            if req.status_code == 403:
                sys.exit(
                    'Problem with ESM download. Maybe the token is no longer valid'
                )
            else:
                sys.exit('Problem with ESM download. Status code: ' +
                         str(req.status_code))

        with ZipFile(zip_output, 'r') as zipObj:
            zipObj.extractall(folder)
        os.remove(zip_output)

    time1 = []
    time2 = []
    inp_acc1 = []
    inp_acc2 = []
    npts1 = []
    npts2 = []

    filename_in = ''
    for i in range(1, 3):
        if folder.find('ESM/GR') > -1:
            file_ew = folder + '/*2.D.*'
            file_ns = folder + '/*3.D.*'
        else:
            file_ew = folder + '/*E.D.*'
            file_ns = folder + '/*N.D.*'
        if i == 1:
            filename_in = glob.glob(file_ew)[0]
        if i == 2:
            filename_in = glob.glob(file_ns)[0]

        headers = {}

        # read file
        fh = open(filename_in, 'rt')
        for j in range(64):
            key, value = fh.readline().strip().split(':', 1)
            headers[key.strip()] = value.strip()

        header = Stats()

        header['dyna'] = {}

        header['network'] = headers['NETWORK']
        header['station'] = headers['STATION_CODE']
        header['location'] = headers['LOCATION']
        header['channel'] = headers['STREAM']
        try:
            # use toUTCDateTime to convert from DYNA format
            header['starttime'] \
                = to_utc_date_time(headers
                                   ['DATE_TIME_FIRST_SAMPLE_YYYYMMDD_HHMMSS'])
        except ValueError:
            header['starttime'] = to_utc_date_time('19700101_000000')
        header['sampling_rate'] = 1 / float(headers['SAMPLING_INTERVAL_S'])
        header['delta'] = float(headers['SAMPLING_INTERVAL_S'])
        header['npts'] = int(headers['NDATA'])
        header['calib'] = 1  # not in file header

        # DYNA dict float data
        header['dyna']['EVENT_LATITUDE_DEGREE'] = strtofloat(
            headers['EVENT_LATITUDE_DEGREE'])
        header['dyna']['EVENT_LONGITUDE_DEGREE'] = strtofloat(
            headers['EVENT_LONGITUDE_DEGREE'])
        header['dyna']['EVENT_DEPTH_KM'] = strtofloat(
            headers['EVENT_DEPTH_KM'])
        header['dyna']['HYPOCENTER_REFERENCE'] = headers[
            'HYPOCENTER_REFERENCE']
        header['dyna']['MAGNITUDE_W'] = strtofloat(headers['MAGNITUDE_W'])
        header['dyna']['MAGNITUDE_L'] = strtofloat(headers['MAGNITUDE_L'])
        header['dyna']['STATION_LATITUDE_DEGREE'] = strtofloat(
            headers['STATION_LATITUDE_DEGREE'])
        header['dyna']['STATION_LONGITUDE_DEGREE'] = strtofloat(
            headers['STATION_LONGITUDE_DEGREE'])
        header['dyna']['VS30_M_S'] = strtofloat(headers['VS30_M/S'])
        header['dyna']['EPICENTRAL_DISTANCE_KM'] = strtofloat(
            headers['EPICENTRAL_DISTANCE_KM'])
        header['dyna']['EARTHQUAKE_BACKAZIMUTH_DEGREE'] = strtofloat(
            headers['EARTHQUAKE_BACKAZIMUTH_DEGREE'])
        header['dyna']['DURATION_S'] = strtofloat(headers['DURATION_S'])
        header['dyna']['INSTRUMENTAL_FREQUENCY_HZ'] = strtofloat(
            headers['INSTRUMENTAL_FREQUENCY_HZ'])
        header['dyna']['INSTRUMENTAL_DAMPING'] = strtofloat(
            headers['INSTRUMENTAL_DAMPING'])
        header['dyna']['FULL_SCALE_G'] = strtofloat(headers['FULL_SCALE_G'])

        # data type is acceleration
        if headers['DATA_TYPE'] == "ACCELERATION" \
                or headers['DATA_TYPE'] == "ACCELERATION RESPONSE SPECTRUM":
            header['dyna']['PGA_CM_S_2'] = strtofloat(headers['PGA_CM/S^2'])
            header['dyna']['TIME_PGA_S'] = strtofloat(headers['TIME_PGA_S'])
        # data type is velocity
        if headers['DATA_TYPE'] == "VELOCITY" \
                or headers['DATA_TYPE'] == "PSEUDO-VELOCITY RESPONSE SPECTRUM":
            header['dyna']['PGV_CM_S'] = strtofloat(headers['PGV_CM/S'])
            header['dyna']['TIME_PGV_S'] = strtofloat(headers['TIME_PGV_S'])
        # data type is displacement
        if headers['DATA_TYPE'] == "DISPLACEMENT" \
                or headers['DATA_TYPE'] == "DISPLACEMENT RESPONSE SPECTRUM":
            header['dyna']['PGD_CM'] = strtofloat(headers['PGD_CM'])
            header['dyna']['TIME_PGD_S'] = strtofloat(headers['TIME_PGD_S'])

        header['dyna']['LOW_CUT_FREQUENCY_HZ'] = strtofloat(
            headers['LOW_CUT_FREQUENCY_HZ'])
        header['dyna']['HIGH_CUT_FREQUENCY_HZ'] = strtofloat(
            headers['HIGH_CUT_FREQUENCY_HZ'])

        # DYNA dict int data
        header['dyna']['STATION_ELEVATION_M'] = strtoint(
            headers['STATION_ELEVATION_M'])
        header['dyna']['SENSOR_DEPTH_M'] = strtoint(headers['SENSOR_DEPTH_M'])
        header['dyna']['N_BIT_DIGITAL_CONVERTER'] = strtoint(
            headers['N_BIT_DIGITAL_CONVERTER'])
        header['dyna']['FILTER_ORDER'] = strtoint(headers['FILTER_ORDER'])

        # DYNA dict string data
        header['dyna']['EVENT_NAME'] = headers['EVENT_NAME']
        header['dyna']['EVENT_ID'] = headers['EVENT_ID']
        header['dyna']['EVENT_DATE_YYYYMMDD'] = headers['EVENT_DATE_YYYYMMDD']
        header['dyna']['EVENT_TIME_HHMMSS'] = headers['EVENT_TIME_HHMMSS']
        header['dyna']['MAGNITUDE_W_REFERENCE'] = headers[
            'MAGNITUDE_W_REFERENCE']
        header['dyna']['MAGNITUDE_L_REFERENCE'] = headers[
            'MAGNITUDE_L_REFERENCE']
        header['dyna']['FOCAL_MECHANISM'] = headers['FOCAL_MECHANISM']
        header['dyna']['STATION_NAME'] = headers['STATION_NAME']
        header['dyna']['SITE_CLASSIFICATION_EC8'] = headers[
            'SITE_CLASSIFICATION_EC8']
        header['dyna']['MORPHOLOGIC_CLASSIFICATION'] = headers[
            'MORPHOLOGIC_CLASSIFICATION']
        header['dyna']['DATE_TIME_FIRST_SAMPLE_PRECISION'] = headers[
            'DATE_TIME_FIRST_SAMPLE_PRECISION']
        header['dyna']['UNITS'] = headers['UNITS']
        header['dyna']['INSTRUMENT'] = headers['INSTRUMENT']
        header['dyna']['INSTRUMENT_ANALOG_DIGITAL'] = headers[
            'INSTRUMENT_ANALOG/DIGITAL']
        header['dyna']['BASELINE_CORRECTION'] = headers['BASELINE_CORRECTION']
        header['dyna']['FILTER_TYPE'] = headers['FILTER_TYPE']
        header['dyna']['LATE_NORMAL_TRIGGERED'] = headers[
            'LATE/NORMAL_TRIGGERED']
        header['dyna']['HEADER_FORMAT'] = headers['HEADER_FORMAT']
        header['dyna']['DATABASE_VERSION'] = headers['DATABASE_VERSION']
        header['dyna']['DATA_TYPE'] = headers['DATA_TYPE']
        header['dyna']['PROCESSING'] = headers['PROCESSING']
        header['dyna']['DATA_LICENSE'] = headers['DATA_LICENSE']
        header['dyna']['DATA_TIMESTAMP_YYYYMMDD_HHMMSS'] = headers[
            'DATA_TIMESTAMP_YYYYMMDD_HHMMSS']
        header['dyna']['DATA_CITATION'] = headers['DATA_CITATION']
        header['dyna']['DATA_CREATOR'] = headers['DATA_CREATOR']
        header['dyna']['ORIGINAL_DATA_MEDIATOR_CITATION'] = headers[
            'ORIGINAL_DATA_MEDIATOR_CITATION']
        header['dyna']['ORIGINAL_DATA_MEDIATOR'] = headers[
            'ORIGINAL_DATA_MEDIATOR']
        header['dyna']['ORIGINAL_DATA_CREATOR_CITATION'] = headers[
            'ORIGINAL_DATA_CREATOR_CITATION']
        header['dyna']['ORIGINAL_DATA_CREATOR'] = headers[
            'ORIGINAL_DATA_CREATOR']
        header['dyna']['USER1'] = headers['USER1']
        header['dyna']['USER2'] = headers['USER2']
        header['dyna']['USER3'] = headers['USER3']
        header['dyna']['USER4'] = headers['USER4']
        header['dyna']['USER5'] = headers['USER5']

        # read data
        acc_data = np.loadtxt(fh, dtype='float32')
        fh.close()

        time = []
        for j in range(0, header['npts']):
            t = j * header['delta']
            time.append(t)

        if i == 1:
            inp_acc1 = np.asarray(acc_data) / 981  # in g
            # comp1=header['channel']
            npts1 = header['npts']
            time1 = time
        if i == 2:
            inp_acc2 = np.asarray(acc_data) / 981  # in g
            # comp2=header['channel']
            npts2 = header['npts']
            time2 = time

    return time1, time2, inp_acc1, inp_acc2, npts1, npts2
Beispiel #42
0
def _read_tspair(filename, headonly=False, **kwargs):  # @UnusedVariable
    """
    Reads a ASCII TSPAIR file and returns an ObsPy Stream object.

    .. warning::
        This function should NOT be called directly, it registers via the
        ObsPy :func:`~obspy.core.stream.read` function, call this instead.

    :type filename: str
    :param filename: ASCII file to be read.
    :type headonly: bool, optional
    :param headonly: If set to True, read only the headers. This is most useful
        for scanning available data in huge (temporary) data sets.
    :rtype: :class:`~obspy.core.stream.Stream`
    :return: A ObsPy Stream object.

    .. rubric:: Example

    >>> from obspy import read
    >>> st = read('/path/to/tspair.ascii')
    """
    with open(filename, 'rt') as fh:
        # read file and split text into channels
        buf = []
        key = False
        for line in fh:
            if line.isspace():
                # blank line
                continue
            elif line.startswith('TIMESERIES'):
                # new header line
                key = True
                buf.append((line, io.StringIO()))
            elif headonly:
                # skip data for option headonly
                continue
            elif key:
                # data entry - may be written in multiple columns
                buf[-1][1].write(line.strip().split()[-1] + ' ')
    # create ObsPy stream object
    stream = Stream()
    for header, data in buf:
        # create Stats
        stats = Stats()
        parts = header.replace(',', '').split()
        temp = parts[1].split('_')
        stats.network = temp[0]
        stats.station = temp[1]
        stats.location = temp[2]
        stats.channel = temp[3]
        stats.sampling_rate = parts[4]
        # quality only used in MSEED
        # don't put blank quality code into 'mseed' dictionary
        # (quality code is mentioned as optional by format specs anyway)
        if temp[4]:
            stats.mseed = AttribDict({'dataquality': temp[4]})
        stats.ascii = AttribDict({'unit': parts[-1]})
        stats.starttime = UTCDateTime(parts[6])
        stats.npts = parts[2]
        if headonly:
            # skip data
            stream.append(Trace(header=stats))
        else:
            data = _parse_data(data, parts[8])
            stream.append(Trace(data=data, header=stats))
    return stream
Beispiel #43
0
def _single_corr_trace_to_obspy_trace(trace):
    """ Convert a correlation trace dictionary to an obspy trace.

    Convert a single correlation trace in an
    :class:`~obspy.core.trace.Trace` object.

    :type corr_trace: dictionary of type correlation trace
    :param corr_trace: input date to be converted

    :rtype: :class:`~obspy.core.trace.Trace`
    :return: **tr**: the obspy object containing the data
    """

    tr = Trace(data=np.squeeze(trace['corr_trace']))
    stats_keys = ['network', 'station', 'location',
                  'channel', 'npts', 'sampling_rate']

    sac_keys = ['baz', 'az', 'stla', 'stlo', 'stel',
                'evla', 'evlo', 'evel', 'dist']

    # copy stats
    for key in stats_keys:
        try:
            tr.stats[key] = trace['stats'][key]
        except:
            print 'Error copying key: %s' % key
            raise
    
    # special keys
    tr.stats['starttime'] = UTCDateTime(
                              convert_time([trace['stats']['starttime']])[0])

    # test for presence of geo information
    flag = 0
    for key in sac_keys:
        if not key in trace['stats']:
            flag += 1
    if flag == 0:  # geo information present
        tr.stats['sac'] = {}
        for key in sac_keys:
            tr.stats['sac'][key] = trace['stats'][key]
            
    # copy stats_tr1
    if 'stats_tr1' in trace:
        tr.stats_tr1 = Stats()
        tr.stats_tr1['starttime'] = UTCDateTime(
                              convert_time([trace['stats_tr1']['starttime']])[0])
        for key in stats_keys:
            try:
                tr.stats_tr1[key] = trace['stats_tr1'][key]
            except:
                print 'Error copying key: %s' % key
                raise
        for key in sac_keys:
            try:
                tr.stats_tr1[key] = trace['stats_tr1'][key]
            except:
                pass

    # copy stats_tr2
    if 'stats_tr2' in trace:
        tr.stats_tr2 = Stats()
        tr.stats_tr2['starttime'] = UTCDateTime(
                              convert_time([trace['stats_tr2']['starttime']])[0])
        for key in stats_keys:
            try:
                tr.stats_tr2[key] = trace['stats_tr2'][key]
            except:
                print 'Error copying key: %s' % key
                raise
        for key in sac_keys:
            try:
                tr.stats_tr2[key] = trace['stats_tr2'][key]
            except:
                pass

    return tr
Beispiel #44
0
def sac_to_obspy_header(sacheader):
    """
    Make an ObsPy Stats header dictionary from a SAC header dictionary.

    :param sacheader: SAC header dictionary.
    :type sacheader: dict

    :rtype: :class:`~obspy.core.Stats`
    :return: Filled ObsPy Stats header.

    """

    # 1. get required sac header values
    try:
        npts = sacheader['npts']
        delta = sacheader['delta']
    except KeyError:
        msg = "Incomplete SAC header information to build an ObsPy header."
        raise KeyError(msg)

    assert npts != HD.INULL
    assert delta != HD.FNULL
    #
    # 2. get time
    try:
        reftime = get_sac_reftime(sacheader)
    except (SacError, ValueError, TypeError):
        # ObsPy doesn't require a valid reftime
        reftime = UTCDateTime(0.0)

    b = sacheader.get('b', HD.FNULL)
    #
    # 3. get optional sac header values
    calib = sacheader.get('scale', HD.FNULL)
    kcmpnm = sacheader.get('kcmpnm', HD.SNULL)
    kstnm = sacheader.get('kstnm', HD.SNULL)
    knetwk = sacheader.get('knetwk', HD.SNULL)
    khole = sacheader.get('khole', HD.SNULL)
    #
    # 4. deal with null values
    b = b if (b != HD.FNULL) else 0.0
    calib = calib if (calib != HD.FNULL) else 1.0
    kcmpnm = kcmpnm if (kcmpnm != HD.SNULL) else ''
    kstnm = kstnm if (kstnm != HD.SNULL) else ''
    knetwk = knetwk if (knetwk != HD.SNULL) else ''
    khole = khole if (khole != HD.SNULL) else ''
    #
    # 5. transform to obspy values
    # nothing is null
    stats = {}
    stats['npts'] = npts
    stats['sampling_rate'] = np.float32(1.) / np.float32(delta)
    stats['network'] = _clean_str(knetwk)
    stats['station'] = _clean_str(kstnm)
    stats['channel'] = _clean_str(kcmpnm)
    stats['location'] = _clean_str(khole)
    stats['calib'] = calib

    # store _all_ provided SAC header values
    stats['sac'] = sacheader.copy()

    # get first sample absolute time as UTCDateTime
    # always add the begin time (if it's defined) to get the given
    # SAC reference time, no matter which iztype is given
    # b may be non-zero, even for iztype 'ib', especially if it was used to
    #   store microseconds from obspy_to_sac_header
    stats['starttime'] = UTCDateTime(reftime) + b

    return Stats(stats)
Beispiel #45
0
def _read_seisan(filename, headonly=False, **kwargs):  # @UnusedVariable
    """
    Reads a SEISAN file and returns an ObsPy Stream object.

    .. warning::
        This function should NOT be called directly, it registers via the
        ObsPy :func:`~obspy.core.stream.read` function, call this instead.

    :type filename: str
    :param filename: SEISAN file to be read.
    :rtype: :class:`~obspy.core.stream.Stream`
    :return: A ObsPy Stream object.

    .. rubric:: Example

    >>> from obspy import read
    >>> st = read("/path/to/2001-01-13-1742-24S.KONO__004")
    >>> st  # doctest: +ELLIPSIS
    <obspy.core.stream.Stream object at 0x...>
    >>> print(st)  # doctest: +ELLIPSIS
    4 Trace(s) in Stream:
    .KONO.0.B0Z | 2001-01-13T17:45:01.999000Z - ... | 20.0 Hz, 6000 samples
    .KONO.0.L0Z | 2001-01-13T17:42:24.924000Z - ... | 1.0 Hz, 3542 samples
    .KONO.0.L0N | 2001-01-13T17:42:24.924000Z - ... | 1.0 Hz, 3542 samples
    .KONO.0.L0E | 2001-01-13T17:42:24.924000Z - ... | 1.0 Hz, 3542 samples
    """
    # get version info from event file header (at least 12*80 bytes)
    fh = open(filename, 'rb')
    data = fh.read(80 * 12)
    (byteorder, arch, version) = _get_version(data)
    dlen = arch // 8
    dtype = np.dtype(native_str(byteorder + 'i' + str(dlen)))
    stype = native_str('=i' + str(dlen))

    def _readline(fh, version=version, dtype=dtype):
        if version >= 7:
            # On Sun, Linux, MaxOSX and PC from version 7.0 (using Digital
            # Fortran), every write is preceded and terminated with 4
            # additional bytes giving the number of bytes in the write.
            # With 64 bit systems, 8 bytes is used to define number of bytes
            # written.
            start_bytes = fh.read(dtype.itemsize)
            # convert to int32/int64
            length = np.fromstring(start_bytes, dtype=dtype)[0]
            data = fh.read(length)
            end_bytes = fh.read(dtype.itemsize)
            assert start_bytes == end_bytes
            return data
        else:  # version <= 6
            # Every write is preceded and terminated with one byte giving the
            # number of bytes in the write. If the write contains more than 128
            # bytes, it is blocked in records of 128 bytes, each with the start
            # and end byte which in this case is the number 128. Each record is
            # thus 130 bytes long.
            data = b''
            while True:
                start_byte = fh.read(1)
                if not start_byte:
                    # end of file
                    break
                # convert to unsigned int8
                length = np.fromstring(start_byte, np.uint8)[0]
                data += fh.read(length)
                end_byte = fh.read(1)
                assert start_byte == end_byte
                if length == 128:
                    # blocked data - repeat loop
                    continue
                # end of blocked data
                break
            return data

    # reset file pointer
    if version >= 7:
        fh.seek(0)
    else:
        # version <= 6 starts with first byte K
        fh.seek(1)
    # event file header
    # line 1
    data = _readline(fh)
    number_of_channels = int(data[30:33])
    # calculate number of lines with channels
    number_of_lines = number_of_channels // 3 + (number_of_channels % 3 and 1)
    if number_of_lines < 10:
        number_of_lines = 10
    # line 2 - always empty
    data = _readline(fh)
    # line 3
    for _i in range(0, number_of_lines):
        data = _readline(fh)
    # now parse each event file channel header + data
    stream = Stream()
    for _i in range(number_of_channels):
        # get channel header
        temp = _readline(fh).decode()
        # create Stats
        header = Stats()
        header['network'] = (temp[16] + temp[19]).strip()
        header['station'] = temp[0:5].strip()
        header['location'] = (temp[7] + temp[12]).strip()
        header['channel'] = (temp[5:7] + temp[8]).strip()
        header['sampling_rate'] = float(temp[36:43])
        header['npts'] = int(temp[43:50])
        # create start and end times
        year = int(temp[9:12]) + 1900
        month = int(temp[17:19])
        day = int(temp[20:22])
        hour = int(temp[23:25])
        mins = int(temp[26:28])
        secs = float(temp[29:35])
        header['starttime'] = UTCDateTime(year, month, day, hour, mins) + secs
        if headonly:
            # skip data
            from_buffer(_readline(fh), dtype=dtype)
            stream.append(Trace(header=header))
        else:
            # fetch data
            data = from_buffer(_readline(fh), dtype=dtype)
            # convert to system byte order
            data = np.require(data, stype)
            if header['npts'] != len(data):
                msg = "Mismatching byte size %d != %d"
                warnings.warn(msg % (header['npts'], len(data)))
            stream.append(Trace(data=data, header=header))
    fh.close()
    return stream
Beispiel #46
0
def readSLIST(filename, headonly=False, **kwargs):  # @UnusedVariable
    """
    Reads a ASCII SLIST file and returns an ObsPy Stream object.

    .. warning::
        This function should NOT be called directly, it registers via the
        ObsPy :func:`~obspy.core.stream.read` function, call this instead.

    :type filename: str
    :param filename: ASCII file to be read.
    :type headonly: bool, optional
    :param headonly: If set to True, read only the head. This is most useful
        for scanning available data in huge (temporary) data sets.
    :rtype: :class:`~obspy.core.stream.Stream`
    :return: A ObsPy Stream object.

    .. rubric:: Example

    >>> from obspy.core import read
    >>> st = read('/path/to/slist.ascii')
    """
    fh = open(filename, 'rt')
    # read file and split text into channels
    headers = {}
    key = None
    for line in fh:
        if line.isspace():
            # blank line
            continue
        elif line.startswith('TIMESERIES'):
            # new header line
            key = line
            headers[key] = StringIO()
        elif headonly:
            # skip data for option headonly
            continue
        elif key:
            # data entry - may be written in multiple columns
            headers[key].write(line.strip() + ' ')
    fh.close()
    # create ObsPy stream object
    stream = Stream()
    for header, data in headers.iteritems():
        # create Stats
        stats = Stats()
        parts = header.replace(',', '').split()
        temp = parts[1].split('_')
        stats.network = temp[0]
        stats.station = temp[1]
        stats.location = temp[2]
        stats.channel = temp[3]
        stats.sampling_rate = parts[4]
        # quality only used in MSEED
        stats.mseed = AttribDict({'dataquality': temp[4]})
        stats.ascii = AttribDict({'unit': parts[-1]})
        stats.starttime = UTCDateTime(parts[6])
        stats.npts = parts[2]
        if headonly:
            # skip data
            stream.append(Trace(header=stats))
        else:
            # parse data
            data.seek(0)
            if parts[8] == 'INTEGER':
                data = loadtxt(data, dtype='int', ndlim=1)
            elif parts[8] == 'FLOAT':
                data = loadtxt(data, dtype='float32', ndlim=1)
            else:
                raise NotImplementedError
            stream.append(Trace(data=data, header=stats))
    return stream
    #plt.savefig("lin688_similarity2.png", dpi=250)

    #trace = extract_trace(segy, 688, 390).data

    #%%
    ################################### Write data  ###########################
    #write
    from obspy.core import AttribDict
    from obspy.core import Stats

    from obspy.core import Trace, Stream
    from obspy.io.segy.segy import SEGYBinaryFileHeader
    from obspy.io.segy.segy import SEGYTraceHeader

    out = Stream(Trace(t, header=dict(delta=dt)) for t in data)
    out.stats = Stats(dict(textual_file_header=segy.textual_file_header))
    #    out.stats.textual_file_header += """energy volume.
    #    Generated: 18 Sep 2016 by Matt Hall [email protected].
    #    Algorithm: github.com/agile-geoscience/bruges.attribute.similarity.
    #    Parameters: duration=0.16 s, dt=0.002 s.""".encode('utf-8')

    out.write('out1.sgy', format='SEGY', data_encoding=1)
    #encode 1 for IBM,5 for IEEE
    #%%
    #####################  Validate the output data  ##########################
    s = _read_segy('out1.sgy', headonly=True)
    s.textual_file_header[:15 * 80]  # First 15 lines.
    s.traces[0].header
    segy.traces[0].data.shape
    d = np.arange(751)
    def test_ppsd_restricted_stacks(self):
        """
        Test PPSD.calculate_histogram() with restrictions to what data should
        be stacked. Also includes image tests.
        """
        # set up a bogus PPSD, with fixed random psds but with real start times
        # of psd pieces, to facilitate testing the stack selection.
        ppsd = PPSD(stats=Stats(dict(sampling_rate=150)),
                    metadata=None,
                    db_bins=(-200, -50, 20.),
                    period_step_octaves=1.4)
        # change data to nowadays used nanoseconds POSIX timestamp
        ppsd._times_processed = [
            UTCDateTime(t)._ns for t in np.load(
                os.path.join(self.path, "ppsd_times_processed.npy")).tolist()
        ]
        np.random.seed(1234)
        ppsd._binned_psds = [
            arr for arr in np.random.uniform(-200, -50, (
                len(ppsd._times_processed), len(ppsd.period_bin_centers)))
        ]

        # Test callback function that selects a fixed random set of the
        # timestamps.  Also checks that we get passed the type we expect,
        # which is 1D numpy ndarray of int type.
        def callback(t_array):
            self.assertIsInstance(t_array, np.ndarray)
            self.assertEqual(t_array.shape, (len(ppsd._times_processed), ))
            self.assertTrue(np.issubdtype(t_array.dtype, np.integer))
            np.random.seed(1234)
            res = np.random.randint(0, 2, len(t_array)).astype(np.bool)
            return res

        # test several different sets of stack criteria, should cover
        # everything, even with lots of combined criteria
        stack_criteria_list = [
            dict(starttime=UTCDateTime(2015, 3, 8), month=[2, 3, 5, 7, 8]),
            dict(endtime=UTCDateTime(2015, 6, 7),
                 year=[2015],
                 time_of_weekday=[(1, 0, 24), (2, 0, 24), (-1, 0, 11)]),
            dict(year=[2013, 2014, 2016, 2017], month=[2, 3, 4]),
            dict(month=[1, 2, 5, 6, 8], year=2015),
            dict(isoweek=[4, 5, 6, 13, 22, 23, 24, 44, 45]),
            dict(time_of_weekday=[(5, 22, 24), (6, 0, 2), (6, 22, 24)]),
            dict(callback=callback, month=[1, 3, 5, 7]),
            dict(callback=callback)
        ]
        expected_selections = np.load(
            os.path.join(self.path, "ppsd_stack_selections.npy"))

        # test every set of criteria
        for stack_criteria, expected_selection in zip(stack_criteria_list,
                                                      expected_selections):
            selection_got = ppsd._stack_selection(**stack_criteria)
            np.testing.assert_array_equal(selection_got, expected_selection)

        # test one particular selection as an image test
        plot_kwargs = dict(max_percentage=15,
                           xaxis_frequency=True,
                           period_lim=(0.01, 50))
        ppsd.calculate_histogram(**stack_criteria_list[1])
        with ImageComparison(self.path_images,
                             'ppsd_restricted_stack.png',
                             reltol=1.5) as ic:
            fig = ppsd.plot(show=False, **plot_kwargs)
            # some matplotlib/Python version combinations lack the left-most
            # tick/label "Jan 2015". Try to circumvent and get the (otherwise
            # OK) test by changing the left x limit a bit further out (by two
            # days, axis is in mpl days). See e.g.
            # https://tests.obspy.org/30657/#1
            fig.axes[1].set_xlim(left=fig.axes[1].get_xlim()[0] - 2)
            with np.errstate(under='ignore'):
                fig.savefig(ic.name)

        # test it again, checking that updating an existing plot with different
        # stack selection works..
        #  a) we start with the stack for the expected image and test that it
        #     matches (like above):
        ppsd.calculate_histogram(**stack_criteria_list[1])
        with ImageComparison(self.path_images,
                             'ppsd_restricted_stack.png',
                             reltol=1.5,
                             plt_close_all_exit=False) as ic:
            fig = ppsd.plot(show=False, **plot_kwargs)
            # some matplotlib/Python version combinations lack the left-most
            # tick/label "Jan 2015". Try to circumvent and get the (otherwise
            # OK) test by changing the left x limit a bit further out (by two
            # days, axis is in mpl days). See e.g.
            # https://tests.obspy.org/30657/#1
            fig.axes[1].set_xlim(left=fig.axes[1].get_xlim()[0] - 2)
            with np.errstate(under='ignore'):
                fig.savefig(ic.name)
        #  b) now reuse figure and set the histogram with a different stack,
        #     image test should fail:
        ppsd.calculate_histogram(**stack_criteria_list[3])
        try:
            with ImageComparison(self.path_images,
                                 'ppsd_restricted_stack.png',
                                 adjust_tolerance=False,
                                 plt_close_all_enter=False,
                                 plt_close_all_exit=False) as ic:
                # rms of the valid comparison above is ~31,
                # rms of the invalid comparison we test here is ~36
                if MATPLOTLIB_VERSION == [1, 1, 1]:
                    ic.tol = 33
                ppsd._plot_histogram(fig=fig, draw=True)
                with np.errstate(under='ignore'):
                    fig.savefig(ic.name)
        except ImageComparisonException:
            pass
        else:
            msg = "Expected ImageComparisonException was not raised."
            self.fail(msg)
        #  c) now reuse figure and set the original histogram stack again,
        #     image test should pass agin:
        ppsd.calculate_histogram(**stack_criteria_list[1])
        with ImageComparison(self.path_images,
                             'ppsd_restricted_stack.png',
                             reltol=1.5,
                             plt_close_all_enter=False) as ic:
            ppsd._plot_histogram(fig=fig, draw=True)
            with np.errstate(under='ignore'):
                fig.savefig(ic.name)
Beispiel #49
0
 def test_delta_zero(self):
     """
     Make sure you can set delta = 0. for #1989
     """
     stat = Stats()
     stat.delta = 0
Beispiel #50
0
def readSLIST(filename, headonly=False, **kwargs):  # @UnusedVariable
    """
    Reads a ASCII SLIST file and returns an ObsPy Stream object.

    .. warning::
        This function should NOT be called directly, it registers via the
        ObsPy :func:`~obspy.core.stream.read` function, call this instead.

    :type filename: str
    :param filename: ASCII file to be read.
    :type headonly: bool, optional
    :param headonly: If set to True, read only the head. This is most useful
        for scanning available data in huge (temporary) data sets.
    :rtype: :class:`~obspy.core.stream.Stream`
    :return: A ObsPy Stream object.

    .. rubric:: Example

    >>> from obspy import read
    >>> st = read('/path/to/slist.ascii')
    """
    with open(filename, 'rt') as fh:
        # read file and split text into channels
        buf = []
        key = False
        for line in fh:
            if line.isspace():
                # blank line
                continue
            elif line.startswith('TIMESERIES'):
                # new header line
                key = True
                buf.append((line, StringIO()))
            elif headonly:
                # skip data for option headonly
                continue
            elif key:
                # data entry - may be written in multiple columns
                buf[-1][1].write(line.strip() + ' ')
    # create ObsPy stream object
    stream = Stream()
    for header, data in buf:
        # create Stats
        stats = Stats()
        parts = header.replace(',', '').split()
        temp = parts[1].split('_')
        stats.network = temp[0]
        stats.station = temp[1]
        stats.location = temp[2]
        stats.channel = temp[3]
        stats.sampling_rate = parts[4]
        # quality only used in MSEED
        stats.mseed = AttribDict({'dataquality': temp[4]})
        stats.ascii = AttribDict({'unit': parts[-1]})
        stats.starttime = UTCDateTime(parts[6])
        stats.npts = parts[2]
        if headonly:
            # skip data
            stream.append(Trace(header=stats))
        else:
            data = _parse_data(data, parts[8])
            stream.append(Trace(data=data, header=stats))
    return stream
Beispiel #51
0
def combine_stats(tr1, tr2):
    """ Combine the meta-information of two ObsPy Trace objects

    This function returns a ObsPy :class:`~obspy.core.trace.Stats` object
    obtained combining the two associated with the input Traces.
    Namely ``tr1.stats`` and ``tr2.stats``.

    The fields ['network','station','location','channel'] are combined in
    a ``-`` separated fashion to create a "pseudo" SEED like ``id``.

    For all the others fields, only "common" information are retained: This
    means that only keywords that exist in both dictionaries will be included
    in the resulting one.

    :type tr1: :class:`~obspy.core.trace.Trace`
    :param tr1: First Trace
    :type tr2: :class:`~obspy.core.trace.Trace`
    :param tr2: Second Trace

    :rtype: :class:`~obspy.core.trace.Stats`
    :return: **stats**: combined Stats object
    """

    if not isinstance(tr1, Trace):
        raise TypeError("tr1 must be an obspy Trace object.")

    if not isinstance(tr2, Trace):
        raise TypeError("tr2 must be an obspy Trace object.")

    tr1_keys = tr1.stats.keys()
    tr2_keys = tr2.stats.keys()

    stats = Stats()

    # Adjust the information to create a new SEED like id
    keywords = ['network', 'station', 'location', 'channel']
    sac_keywords = ['sac']

    for key in keywords:
        if key in tr1_keys and key in tr2_keys:
            stats[key] = tr1.stats[key] + '-' + tr2.stats[key]

    for key in tr1_keys:
        if key not in keywords and key not in sac_keywords:
            if key in tr2_keys:
                if tr1.stats[key] == tr2.stats[key]:
                    # in the stats object there are read only objects
                    try:
                        stats[key] = tr1.stats[key]
                    except AttributeError:
                        pass

    try:
        stats['sac'] = {}
        stats.sac['stla'] = tr1.stats.sac.stla
        stats.sac['stlo'] = tr1.stats.sac.stlo
        stats.sac['stel'] = tr1.stats.sac.stel
        stats.sac['evla'] = tr2.stats.sac.stla
        stats.sac['evlo'] = tr2.stats.sac.stlo
        stats.sac['evel'] = tr2.stats.sac.stel

        az, baz, dist = trace_calc_az_baz_dist(tr1, tr2)

        stats.sac['dist'] = dist / 1000
        stats.sac['az'] = az
        stats.sac['baz'] = baz
    except AttributeError:
        stats.pop('sac')
        print "No station coordinates provided."

    return stats