def read(self, file_p, length, endian, param): """ read data from file_p :param file_p: file pointer :param length: length to be read :param endian: endian type in datafile :type param: list :param param: sampling rate,sample size, block time, channels :rtype: list of list :return: list of data """ buff = file_p.read(length) samplerate = param[0] numbyte = param[1] numchan = param[3] num = (samplerate // 10) * numbyte * numchan if length != num: raise EvtBadDataError("Bad data length") if numbyte == 2: data = frombuffer(buff, ">h").reshape((-1, numchan)).T elif numbyte == 4: data = frombuffer(buff, ">i").reshape((-1, numchan)).T elif numbyte == 3: data = np.empty((numchan, samplerate // 10)) for j in range(samplerate // 10): for k in range(numchan): i = (j * numchan) + k val = unpack(b">i", buff[i * 3:(i * 3) + 3] + b'\0')[0] \ >> 8 data[k, j] = val return data
def getMSRecord(self): # following from obspy.mseed.tests.test_libmseed.py -> test_msrParse msr = clibmseed.msr_init(C.POINTER(MSRecord)()) pyobj = frombuffer(self.msrecord, dtype=np.uint8) errcode = clibmseed.msr_parse(pyobj.ctypes.data_as(C.POINTER(C.c_char)), len(pyobj), C.pointer(msr), -1, 1, 1) if errcode != 0: msg = "failed to decode mini-seed record: msr_parse errcode: %s" raise SeedLinkException(msg % (errcode)) # print "DEBUG: msr:", msr msrecord_py = msr.contents # print "DEBUG: msrecord_py:", msrecord_py return msrecord_py
def getMSRecord(self): # following from obspy.mseed.tests.test_libmseed.py -> test_msrParse msr = clibmseed.msr_init(C.POINTER(MSRecord)()) pyobj = frombuffer(self.msrecord, dtype=np.uint8) errcode = \ clibmseed.msr_parse(pyobj.ctypes.data_as(C.POINTER(C.c_char)), len(pyobj), C.pointer(msr), -1, 1, 1) if errcode != 0: msg = "failed to decode mini-seed record: msr_parse errcode: %s" raise SeedLinkException(msg % (errcode)) # print "DEBUG: msr:", msr msrecord_py = msr.contents # print "DEBUG: msrecord_py:", msrecord_py return msrecord_py
def readCSS(filename, **kwargs): """ Reads a CSS waveform file and returns a Stream object. .. warning:: This function should NOT be called directly, it registers via the ObsPy :func:`~obspy.core.stream.read` function, call this instead. :type filename: str :param filename: CSS file to be read. :rtype: :class:`~obspy.core.stream.Stream` :returns: Stream with Traces specified by given file. """ # read metafile with info on single traces with open(filename, "rb") as fh: lines = fh.readlines() basedir = os.path.dirname(filename) traces = [] # read single traces for line in lines: npts = int(line[79:87]) dirname = line[148:212].strip().decode() filename = line[213:245].strip().decode() filename = os.path.join(basedir, dirname, filename) offset = int(line[246:256]) dtype = DTYPE[line[143:145]] if isinstance(dtype, tuple): read_fmt = np.dtype(dtype[0]) fmt = dtype[1] else: read_fmt = np.dtype(dtype) fmt = read_fmt with open(filename, "rb") as fh: fh.seek(offset) data = fh.read(read_fmt.itemsize * npts) data = frombuffer(data, dtype=read_fmt) data = np.require(data, dtype=fmt) header = {} header['station'] = line[0:6].strip().decode() header['channel'] = line[7:15].strip().decode() header['starttime'] = UTCDateTime(float(line[16:33])) header['sampling_rate'] = float(line[88:99]) header['calib'] = float(line[100:116]) header['calper'] = float(line[117:133]) tr = Trace(data, header=header) traces.append(tr) return Stream(traces=traces)
def test_bugWriteReadFloat32SEEDWin32(self): """ Test case for issue #64. """ # create stream object data = np.array([395.07809448, 395.0782, 1060.28112793, -1157.37487793, -1236.56237793, 355.07028198, -1181.42175293], dtype=np.float32) st = Stream([Trace(data=data)]) with NamedTemporaryFile() as tf: tempfile = tf.name writeMSEED(st, tempfile, format="MSEED") # read temp file directly without libmseed with open(tempfile, 'rb') as fp: fp.seek(56) dtype = np.dtype('>f4') bin_data = frombuffer(fp.read(7 * dtype.itemsize), dtype=dtype) np.testing.assert_array_equal(data, bin_data) # read via ObsPy st2 = readMSEED(tempfile) # test results np.testing.assert_array_equal(data, st2[0].data)
def test_bugWriteReadFloat32SEEDWin32(self): """ Test case for issue #64. """ # create stream object data = np.array([ 395.07809448, 395.0782, 1060.28112793, -1157.37487793, -1236.56237793, 355.07028198, -1181.42175293 ], dtype=np.float32) st = Stream([Trace(data=data)]) with NamedTemporaryFile() as tf: tempfile = tf.name writeMSEED(st, tempfile, format="MSEED") # read temp file directly without libmseed with open(tempfile, 'rb') as fp: fp.seek(56) dtype = np.dtype(native_str('>f4')) bin_data = frombuffer(fp.read(7 * dtype.itemsize), dtype=dtype) np.testing.assert_array_equal(data, bin_data) # read via ObsPy st2 = readMSEED(tempfile) # test results np.testing.assert_array_equal(data, st2[0].data)
def readSEISAN(filename, headonly=False, **kwargs): # @UnusedVariable """ Reads a SEISAN file and returns an ObsPy Stream object. .. warning:: This function should NOT be called directly, it registers via the ObsPy :func:`~obspy.core.stream.read` function, call this instead. :type filename: str :param filename: SEISAN file to be read. :rtype: :class:`~obspy.core.stream.Stream` :return: A ObsPy Stream object. .. rubric:: Example >>> from obspy import read >>> st = read("/path/to/2001-01-13-1742-24S.KONO__004") >>> st # doctest: +ELLIPSIS <obspy.core.stream.Stream object at 0x...> >>> print(st) # doctest: +ELLIPSIS 4 Trace(s) in Stream: .KONO.0.B0Z | 2001-01-13T17:45:01.999000Z - ... | 20.0 Hz, 6000 samples .KONO.0.L0Z | 2001-01-13T17:42:24.924000Z - ... | 1.0 Hz, 3542 samples .KONO.0.L0N | 2001-01-13T17:42:24.924000Z - ... | 1.0 Hz, 3542 samples .KONO.0.L0E | 2001-01-13T17:42:24.924000Z - ... | 1.0 Hz, 3542 samples """ def _readline(fh, length=80): data = fh.read(length + 8) end = length + 4 start = 4 return data[start:end] # read data chunk from given file fh = open(filename, 'rb') data = fh.read(80 * 12) # get version info from file (byteorder, arch, _version) = _getVersion(data) # fetch lines fh.seek(0) # start with event file header # line 1 data = _readline(fh) number_of_channels = int(data[30:33]) # calculate number of lines with channels number_of_lines = number_of_channels // 3 + (number_of_channels % 3 and 1) if number_of_lines < 10: number_of_lines = 10 # line 2 data = _readline(fh) # line 3 for _i in range(0, number_of_lines): data = _readline(fh) # now parse each event file channel header + data stream = Stream() dlen = arch // 8 dtype = np.dtype(native_str(byteorder + 'i' + str(dlen))) stype = native_str('=i' + str(dlen)) for _i in range(number_of_channels): # get channel header temp = _readline(fh, 1040).decode() # create Stats header = Stats() header['network'] = (temp[16] + temp[19]).strip() header['station'] = temp[0:5].strip() header['location'] = (temp[7] + temp[12]).strip() header['channel'] = (temp[5:7] + temp[8]).strip() header['sampling_rate'] = float(temp[36:43]) header['npts'] = int(temp[43:50]) # create start and end times year = int(temp[9:12]) + 1900 month = int(temp[17:19]) day = int(temp[20:22]) hour = int(temp[23:25]) mins = int(temp[26:28]) secs = float(temp[29:35]) header['starttime'] = UTCDateTime(year, month, day, hour, mins) + secs if headonly: # skip data fh.seek(dlen * (header['npts'] + 2), 1) stream.append(Trace(header=header)) else: # fetch data data = frombuffer(fh.read((header['npts'] + 2) * dtype.itemsize), dtype=dtype) # convert to system byte order data = np.require(data, stype) stream.append(Trace(data=data[2:], header=header)) fh.close() return stream
def readSEISAN(filename, headonly=False, **kwargs): # @UnusedVariable """ Reads a SEISAN file and returns an ObsPy Stream object. .. warning:: This function should NOT be called directly, it registers via the ObsPy :func:`~obspy.core.stream.read` function, call this instead. :type filename: str :param filename: SEISAN file to be read. :rtype: :class:`~obspy.core.stream.Stream` :return: A ObsPy Stream object. .. rubric:: Example >>> from obspy import read >>> st = read("/path/to/2001-01-13-1742-24S.KONO__004") >>> st # doctest: +ELLIPSIS <obspy.core.stream.Stream object at 0x...> >>> print(st) # doctest: +ELLIPSIS 4 Trace(s) in Stream: .KONO.0.B0Z | 2001-01-13T17:45:01.999000Z - ... | 20.0 Hz, 6000 samples .KONO.0.L0Z | 2001-01-13T17:42:24.924000Z - ... | 1.0 Hz, 3542 samples .KONO.0.L0N | 2001-01-13T17:42:24.924000Z - ... | 1.0 Hz, 3542 samples .KONO.0.L0E | 2001-01-13T17:42:24.924000Z - ... | 1.0 Hz, 3542 samples """ def _readline(fh, length=80): data = fh.read(length + 8) end = length + 4 start = 4 return data[start:end] # read data chunk from given file fh = open(filename, 'rb') data = fh.read(80 * 12) # get version info from file (byteorder, arch, _version) = _getVersion(data) # fetch lines fh.seek(0) # start with event file header # line 1 data = _readline(fh) number_of_channels = int(data[30:33]) # calculate number of lines with channels number_of_lines = number_of_channels // 3 + (number_of_channels % 3 and 1) if number_of_lines < 10: number_of_lines = 10 # line 2 data = _readline(fh) # line 3 for _i in range(0, number_of_lines): data = _readline(fh) # now parse each event file channel header + data stream = Stream() dlen = arch // 8 dtype = np.dtype(native_str(byteorder + 'i' + str(dlen))) stype = native_str('=i' + str(dlen)) for _i in range(number_of_channels): # get channel header temp = _readline(fh, 1040).decode() # create Stats header = Stats() header['network'] = (temp[16] + temp[19]).strip() header['station'] = temp[0:5].strip() header['location'] = (temp[7] + temp[12]).strip() header['channel'] = (temp[5:7] + temp[8]).strip() header['sampling_rate'] = float(temp[36:43]) header['npts'] = int(temp[43:50]) # create start and end times year = int(temp[9:12]) + 1900 month = int(temp[17:19]) day = int(temp[20:22]) hour = int(temp[23:25]) mins = int(temp[26:28]) secs = float(temp[29:35]) header['starttime'] = UTCDateTime(year, month, day, hour, mins) + secs if headonly: # skip data fh.seek(dlen * (header['npts'] + 2), 1) stream.append(Trace(header=header)) else: # fetch data data = frombuffer( fh.read((header['npts'] + 2) * dtype.itemsize), dtype=dtype) # convert to system byte order data = np.require(data, stype) stream.append(Trace(data=data[2:], header=header)) fh.close() return stream
def readY(filename, headonly=False, **kwargs): # @UnusedVariable """ Reads a Nanometrics Y file and returns an ObsPy Stream object. .. warning:: This function should NOT be called directly, it registers via the ObsPy :func:`~obspy.core.stream.read` function, call this instead. :type filename: str :param filename: Nanometrics Y file to be read. :type headonly: bool, optional :param headonly: If set to True, read only the head. This is most useful for scanning available data in huge (temporary) data sets. :rtype: :class:`~obspy.core.stream.Stream` :return: A ObsPy Stream object. .. rubric:: Example >>> from obspy import read >>> st = read("/path/to/YAYT_BHZ_20021223.124800") >>> st # doctest: +ELLIPSIS <obspy.core.stream.Stream object at 0x...> >>> print(st) # doctest: +ELLIPSIS 1 Trace(s) in Stream: .AYT..BHZ | 2002-12-23T12:48:00.000100Z - ... | 100.0 Hz, 18000 samples """ # The first tag in a Y-file must be the TAG_Y_FILE (0) tag. This must be # followed by the following tags, in any order: # TAG_STATION_INFO (1) # TAG_STATION_LOCATION (2) # TAG_STATION_PARAMETERS (3) # TAG_STATION_DATABASE (4) # TAG_SERIES_INFO (5) # TAG_SERIES_DATABASE (6) # The following tag is optional: # TAG_STATION_RESPONSE (26) # The last tag in the file must be a TAG_DATA_INT32 (7) tag. This tag must # be followed by an array of LONG's. The number of entries in the array # must agree with what was described in the TAG_SERIES_INFO data. with open(filename, 'rb') as fh: trace = Trace() trace.stats.y = AttribDict() count = -1 while True: endian, tag_type, next_tag, _next_same = __parseTag(fh) if tag_type == 1: # TAG_STATION_INFO # UCHAR Update[8] # This field is only used internally for administrative # purposes. It should always be set to zeroes. # UCHAR Station[5] (BLANKPAD) # Station is the five letter SEED format station # identification. # UCHAR Location[2] (BLANKPAD) # Location Location is the two letter SEED format location # identification. # UCHAR Channel[3] (BLANKPAD) # Channel Channel is the three letter SEED format channel # identification. # UCHAR NetworkID[51] (ASCIIZ) # This is some descriptive text identifying the network. # UCHAR SiteName[61] (ASCIIZ) # SiteName is some text identifying the site. # UCHAR Comment[31] (ASCIIZ) # Comment is any comment for this station. # UCHAR SensorType[51] (ASCIIZ) # SensorType is some text describing the type of sensor used # at the station. # UCHAR DataFormat[7] (ASCIIZ) # DataFormat is some text describing the data format recorded # at the station. data = fh.read(next_tag) parts = [ p.decode() for p in unpack(b'5s2s3s51s61s31s51s7s', data[8:]) ] trace.stats.station = parts[0].strip() trace.stats.location = parts[1].strip() trace.stats.channel = parts[2].strip() # extra params = AttribDict() params.network_id = parts[3].rstrip('\x00') params.side_name = parts[4].rstrip('\x00') params.comment = parts[5].rstrip('\x00') params.sensor_type = parts[6].rstrip('\x00') params.data_format = parts[7].rstrip('\x00') trace.stats.y.tag_station_info = params elif tag_type == 2: # TAG_STATION_LOCATION # UCHAR Update[8] # This field is only used internally for administrative # purposes. It should always be set to zeroes. # FLOAT Latitude # Latitude in degrees of the location of the station. The # latitude should be between -90 (South) and +90 (North). # FLOAT Longitude # Longitude in degrees of the location of the station. The # longitude should be between -180 (West) and +180 (East). # FLOAT Elevation # Elevation in meters above sea level of the station. # FLOAT Depth # Depth is the depth in meters of the sensor. # FLOAT Azimuth # Azimuth of the sensor in degrees clockwise. # FLOAT Dip # Dip is the dip of the sensor. 90 degrees is defined as # vertical right way up. data = fh.read(next_tag) parts = unpack(endian + b'ffffff', data[8:]) params = AttribDict() params.latitude = parts[0] params.longitude = parts[1] params.elevation = parts[2] params.depth = parts[3] params.azimuth = parts[4] params.dip = parts[5] trace.stats.y.tag_station_location = params elif tag_type == 3: # TAG_STATION_PARAMETERS # UCHAR Update[16] # This field is only used internally for administrative # purposes. It should always be set to zeroes. # REALTIME StartValidTime # Time that the information in these records became valid. # REALTIME EndValidTime # Time that the information in these records became invalid. # FLOAT Sensitivity # Sensitivity of the sensor in nanometers per bit. # FLOAT SensFreq # Frequency at which the sensitivity was measured. # FLOAT SampleRate # This is the number of samples per second. This value can be # less than 1.0. (i.e. 0.1) # FLOAT MaxClkDrift # Maximum drift rate of the clock in seconds per sample. # UCHAR SensUnits[24] (ASCIIZ) # Some text indicating the units in which the sensitivity was # measured. # UCHAR CalibUnits[24] (ASCIIZ) # Some text indicating the units in which calibration input # was measured. # UCHAR ChanFlags[27] (BLANKPAD) # Text indicating the channel flags according to the SEED # definition. # UCHAR UpdateFlag # This flag must be “N” or “U” according to the SEED # definition. # UCHAR Filler[4] # Filler Pads out the record to satisfy the alignment # restrictions for reading data on a SPARC processor. data = fh.read(next_tag) parts = unpack(endian + b'ddffff24s24s27sc4s', data[16:]) trace.stats.sampling_rate = parts[4] # extra params = AttribDict() params.start_valid_time = parts[0] params.end_valid_time = parts[1] params.sensitivity = parts[2] params.sens_freq = parts[3] params.sample_rate = parts[4] params.max_clk_drift = parts[5] params.sens_units = parts[6].rstrip(b'\x00').decode() params.calib_units = parts[7].rstrip(b'\x00').decode() params.chan_flags = parts[8].strip() params.update_flag = parts[9] trace.stats.y.tag_station_parameters = params elif tag_type == 4: # TAG_STATION_DATABASE # UCHAR Update[8] # This field is only used internally for administrative # purposes. It should always be set to zeroes. # REALTIME LoadDate # Date the information was loaded into the database. # UCHAR Key[16] # Unique key that identifies this record in the database. data = fh.read(next_tag) parts = unpack(endian + b'd16s', data[8:]) params = AttribDict() params.load_date = parts[0] params.key = parts[1].rstrip(b'\x00') trace.stats.y.tag_station_database = params elif tag_type == 5: # TAG_SERIES_INFO # UCHAR Update[16] # This field is only used internally for administrative # purposes. It should always be set to zeroes. # REALTIME StartTime # This is start time of the data in this series. # REALTIME EndTime # This is end time of the data in this series. # ULONG NumSamples # This is the number of samples of data in this series. # LONG DCOffset # DCOffset is the DC offset of the data. # LONG MaxAmplitude # MaxAmplitude is the maximum amplitude of the data. # LONG MinAmplitude # MinAmplitude is the minimum amplitude of the data. # UCHAR Format[8] (ASCIIZ) # This is the format of the data. This should always be # “YFILE”. # UCHAR FormatVersion[8] (ASCIIZ) # FormatVersion is the version of the format of the data. # This should always be “5.0” data = fh.read(next_tag) parts = unpack(endian + b'ddLlll8s8s', data[16:]) trace.stats.starttime = UTCDateTime(parts[0]) count = parts[2] # extra params = AttribDict() params.endtime = UTCDateTime(parts[1]) params.num_samples = parts[2] params.dc_offset = parts[3] params.max_amplitude = parts[4] params.min_amplitude = parts[5] params.format = parts[6].rstrip(b'\x00').decode() params.format_version = parts[7].rstrip(b'\x00').decode() trace.stats.y.tag_series_info = params elif tag_type == 6: # TAG_SERIES_DATABASE # UCHAR Update[8] # This field is only used internally for administrative # purposes. It should always be set to zeroes. # REALTIME LoadDate # Date the information was loaded into the database. # UCHAR Key[16] # Unique key that identifies this record in the database. data = fh.read(next_tag) parts = unpack(endian + b'd16s', data[8:]) params = AttribDict() params.load_date = parts[0] params.key = parts[1].rstrip(b'\x00').decode() trace.stats.y.tag_series_database = params elif tag_type == 26: # TAG_STATION_RESPONSE # UCHAR Update[8] # This field is only used internally for administrative # purposes. It should always be set to zeroes. # UCHAR PathName[260] # PathName is the full name of the file which contains the # response information for this station. data = fh.read(next_tag) parts = unpack(b'260s', data[8:]) params = AttribDict() params.path_name = parts[0].rstrip(b'\x00').decode() trace.stats.y.tag_station_response = params elif tag_type == 7: # TAG_DATA_INT32 trace.data = frombuffer(fh.read( np.dtype(np.int32).itemsize * count), dtype=np.int32) # break loop as TAG_DATA_INT32 should be the last tag in file break else: fh.seek(next_tag, 1) return Stream([trace])
def readY(filename, headonly=False, **kwargs): # @UnusedVariable """ Reads a Nanometrics Y file and returns an ObsPy Stream object. .. warning:: This function should NOT be called directly, it registers via the ObsPy :func:`~obspy.core.stream.read` function, call this instead. :type filename: str :param filename: Nanometrics Y file to be read. :type headonly: bool, optional :param headonly: If set to True, read only the head. This is most useful for scanning available data in huge (temporary) data sets. :rtype: :class:`~obspy.core.stream.Stream` :return: A ObsPy Stream object. .. rubric:: Example >>> from obspy import read >>> st = read("/path/to/YAYT_BHZ_20021223.124800") >>> st # doctest: +ELLIPSIS <obspy.core.stream.Stream object at 0x...> >>> print(st) # doctest: +ELLIPSIS 1 Trace(s) in Stream: .AYT..BHZ | 2002-12-23T12:48:00.000100Z - ... | 100.0 Hz, 18000 samples """ # The first tag in a Y-file must be the TAG_Y_FILE (0) tag. This must be # followed by the following tags, in any order: # TAG_STATION_INFO (1) # TAG_STATION_LOCATION (2) # TAG_STATION_PARAMETERS (3) # TAG_STATION_DATABASE (4) # TAG_SERIES_INFO (5) # TAG_SERIES_DATABASE (6) # The following tag is optional: # TAG_STATION_RESPONSE (26) # The last tag in the file must be a TAG_DATA_INT32 (7) tag. This tag must # be followed by an array of LONG's. The number of entries in the array # must agree with what was described in the TAG_SERIES_INFO data. with open(filename, 'rb') as fh: trace = Trace() trace.stats.y = AttribDict() count = -1 while True: endian, tag_type, next_tag, _next_same = __parseTag(fh) if tag_type == 1: # TAG_STATION_INFO # UCHAR Update[8] # This field is only used internally for administrative # purposes. It should always be set to zeroes. # UCHAR Station[5] (BLANKPAD) # Station is the five letter SEED format station # identification. # UCHAR Location[2] (BLANKPAD) # Location Location is the two letter SEED format location # identification. # UCHAR Channel[3] (BLANKPAD) # Channel Channel is the three letter SEED format channel # identification. # UCHAR NetworkID[51] (ASCIIZ) # This is some descriptive text identifying the network. # UCHAR SiteName[61] (ASCIIZ) # SiteName is some text identifying the site. # UCHAR Comment[31] (ASCIIZ) # Comment is any comment for this station. # UCHAR SensorType[51] (ASCIIZ) # SensorType is some text describing the type of sensor used # at the station. # UCHAR DataFormat[7] (ASCIIZ) # DataFormat is some text describing the data format recorded # at the station. data = fh.read(next_tag) parts = [p.decode() for p in unpack(b'5s2s3s51s61s31s51s7s', data[8:])] trace.stats.station = parts[0].strip() trace.stats.location = parts[1].strip() trace.stats.channel = parts[2].strip() # extra params = AttribDict() params.network_id = parts[3].rstrip('\x00') params.side_name = parts[4].rstrip('\x00') params.comment = parts[5].rstrip('\x00') params.sensor_type = parts[6].rstrip('\x00') params.data_format = parts[7].rstrip('\x00') trace.stats.y.tag_station_info = params elif tag_type == 2: # TAG_STATION_LOCATION # UCHAR Update[8] # This field is only used internally for administrative # purposes. It should always be set to zeroes. # FLOAT Latitude # Latitude in degrees of the location of the station. The # latitude should be between -90 (South) and +90 (North). # FLOAT Longitude # Longitude in degrees of the location of the station. The # longitude should be between -180 (West) and +180 (East). # FLOAT Elevation # Elevation in meters above sea level of the station. # FLOAT Depth # Depth is the depth in meters of the sensor. # FLOAT Azimuth # Azimuth of the sensor in degrees clockwise. # FLOAT Dip # Dip is the dip of the sensor. 90 degrees is defined as # vertical right way up. data = fh.read(next_tag) parts = unpack(endian + b'ffffff', data[8:]) params = AttribDict() params.latitude = parts[0] params.longitude = parts[1] params.elevation = parts[2] params.depth = parts[3] params.azimuth = parts[4] params.dip = parts[5] trace.stats.y.tag_station_location = params elif tag_type == 3: # TAG_STATION_PARAMETERS # UCHAR Update[16] # This field is only used internally for administrative # purposes. It should always be set to zeroes. # REALTIME StartValidTime # Time that the information in these records became valid. # REALTIME EndValidTime # Time that the information in these records became invalid. # FLOAT Sensitivity # Sensitivity of the sensor in nanometers per bit. # FLOAT SensFreq # Frequency at which the sensitivity was measured. # FLOAT SampleRate # This is the number of samples per second. This value can be # less than 1.0. (i.e. 0.1) # FLOAT MaxClkDrift # Maximum drift rate of the clock in seconds per sample. # UCHAR SensUnits[24] (ASCIIZ) # Some text indicating the units in which the sensitivity was # measured. # UCHAR CalibUnits[24] (ASCIIZ) # Some text indicating the units in which calibration input # was measured. # UCHAR ChanFlags[27] (BLANKPAD) # Text indicating the channel flags according to the SEED # definition. # UCHAR UpdateFlag # This flag must be “N” or “U” according to the SEED # definition. # UCHAR Filler[4] # Filler Pads out the record to satisfy the alignment # restrictions for reading data on a SPARC processor. data = fh.read(next_tag) parts = unpack(endian + b'ddffff24s24s27sc4s', data[16:]) trace.stats.sampling_rate = parts[4] # extra params = AttribDict() params.start_valid_time = parts[0] params.end_valid_time = parts[1] params.sensitivity = parts[2] params.sens_freq = parts[3] params.sample_rate = parts[4] params.max_clk_drift = parts[5] params.sens_units = parts[6].rstrip(b'\x00').decode() params.calib_units = parts[7].rstrip(b'\x00').decode() params.chan_flags = parts[8].strip() params.update_flag = parts[9] trace.stats.y.tag_station_parameters = params elif tag_type == 4: # TAG_STATION_DATABASE # UCHAR Update[8] # This field is only used internally for administrative # purposes. It should always be set to zeroes. # REALTIME LoadDate # Date the information was loaded into the database. # UCHAR Key[16] # Unique key that identifies this record in the database. data = fh.read(next_tag) parts = unpack(endian + b'd16s', data[8:]) params = AttribDict() params.load_date = parts[0] params.key = parts[1].rstrip(b'\x00') trace.stats.y.tag_station_database = params elif tag_type == 5: # TAG_SERIES_INFO # UCHAR Update[16] # This field is only used internally for administrative # purposes. It should always be set to zeroes. # REALTIME StartTime # This is start time of the data in this series. # REALTIME EndTime # This is end time of the data in this series. # ULONG NumSamples # This is the number of samples of data in this series. # LONG DCOffset # DCOffset is the DC offset of the data. # LONG MaxAmplitude # MaxAmplitude is the maximum amplitude of the data. # LONG MinAmplitude # MinAmplitude is the minimum amplitude of the data. # UCHAR Format[8] (ASCIIZ) # This is the format of the data. This should always be # “YFILE”. # UCHAR FormatVersion[8] (ASCIIZ) # FormatVersion is the version of the format of the data. # This should always be “5.0” data = fh.read(next_tag) parts = unpack(endian + b'ddLlll8s8s', data[16:]) trace.stats.starttime = UTCDateTime(parts[0]) count = parts[2] # extra params = AttribDict() params.endtime = UTCDateTime(parts[1]) params.num_samples = parts[2] params.dc_offset = parts[3] params.max_amplitude = parts[4] params.min_amplitude = parts[5] params.format = parts[6].rstrip(b'\x00').decode() params.format_version = parts[7].rstrip(b'\x00').decode() trace.stats.y.tag_series_info = params elif tag_type == 6: # TAG_SERIES_DATABASE # UCHAR Update[8] # This field is only used internally for administrative # purposes. It should always be set to zeroes. # REALTIME LoadDate # Date the information was loaded into the database. # UCHAR Key[16] # Unique key that identifies this record in the database. data = fh.read(next_tag) parts = unpack(endian + b'd16s', data[8:]) params = AttribDict() params.load_date = parts[0] params.key = parts[1].rstrip(b'\x00').decode() trace.stats.y.tag_series_database = params elif tag_type == 26: # TAG_STATION_RESPONSE # UCHAR Update[8] # This field is only used internally for administrative # purposes. It should always be set to zeroes. # UCHAR PathName[260] # PathName is the full name of the file which contains the # response information for this station. data = fh.read(next_tag) parts = unpack(b'260s', data[8:]) params = AttribDict() params.path_name = parts[0].rstrip(b'\x00').decode() trace.stats.y.tag_station_response = params elif tag_type == 7: # TAG_DATA_INT32 trace.data = frombuffer( fh.read(np.dtype(np.int32).itemsize * count), dtype=np.int32) # break loop as TAG_DATA_INT32 should be the last tag in file break else: fh.seek(next_tag, 1) return Stream([trace])