class Blockette012(Blockette): """ Blockette 012: Volume Time Span Index Blockette. This blockette forms an index to the time spans that encompass the actual data. One index entry exists for each time span recorded later in the volume. Time spans are not used for field station type volumes. There should be one entry in this index for each time span control header. (For more information, see the notes for blockettes [70], [73], and [74].) Sample: 012006300011992,001,00:00:00.0000~1992,002,00:00:00.0000~000014 """ id = 12 name = "Volume Timespan Index" fields = [ Integer(3, "Number of spans in table", 4), # REPEAT fields 4 — 6 for the Number of spans in table: Loop("Timespan", "Number of spans in table", [ VariableString(4, "Beginning of span", 1, 22, 'T'), VariableString(5, "End of span", 1, 22, 'T'), Integer( 6, "Sequence number of time span header", 6, ignore=True) ], optional=True), ]
class Blockette010(Blockette): """ Blockette 010: Volume Identifier Blockette. This is the normal header blockette for station or event oriented network volumes. Include it once at the beginning of each logical volume or sub- volume. Sample: 010009502.1121992,001,00:00:00.0000~1992,002,00:00:00.0000~1993,029~ IRIS _ DMC~Data for 1992,001~ """ id = 10 name = "Volume Identifier" fields = [ Float(3, "Version of format", 4, mask='%2.1f', default_value=2.4, xseed_version='1.0'), Float(3, "Version of format", 4, mask='%2.1f', default_value=2.4, ignore=True, xseed_version='1.1'), Integer(4, "Logical record length", 2, default_value=12, xseed_version='1.0'), Integer(4, "Logical record length", 2, default_value=12, ignore=True, xseed_version='1.1'), VariableString(5, "Beginning time", 1, 22, 'T'), VariableString(6, "End time", 1, 22, 'T', default_value=UTCDateTime(2038, 1, 1)), VariableString(7, "Volume Time", 1, 22, 'T', version=2.3), VariableString(8, "Originating Organization", 1, 80, version=2.3), VariableString(9, "Label", 1, 80, version=2.3) ]
class Blockette030(Blockette): """ Blockette 030: Data Format Dictionary Blockette. All volumes, with the exception of miniSEED data records, must have a Data Format Dictionary Blockette [30]. Each Channel Identifier Blockette [52] has a reference (field 16) back to a Data Format Dictionary Blockette [30], so that SEED reading programs will know how to decode data for the channels. Because every kind of data format requires an entry in the Data Format Dictionary Blockette [30], each recording network needs to list entries for each data format, if a heterogeneous mix of data formats are included in a volume. This data format dictionary is used to decompress the data correctly. Sample: 0300086CDSN Gain-Ranged Format~000200104M0~W2 D0-13 A-8191~D1415~ P0:#0,1:#2,2:#4,3:#7~ """ id = 30 name = "Data Format Dictionary" fields = [ VariableString(3, "Short descriptive name", 1, 50, 'UNLPS'), Integer(4, "Data format identifier code", 4), Integer(5, "Data family type", 3), Integer(6, "Number of decoder keys", 2), # REPEAT field 7 for the Number of decoder keys: Loop("Decoder keys", "Number of decoder keys", [VariableString(7, "Decoder keys", flags='UNLPS')], omit_tag=True), ]
class Blockette050(Blockette): """ Blockette 050: Station Identifier Blockette. Sample: 0500097ANMO +34.946200-106.456700+1740.00006001Albuquerque, NewMexico, USA~ 0013210101989,241~~NIU """ id = 50 name = "Station Identifier" fields = [ FixedString(3, "Station call letters", 5, 'UN'), Float(4, "Latitude", 10, mask='%+02.6f'), Float(5, "Longitude", 11, mask='%+03.6f'), Float(6, "Elevation", 7, mask='%+04.1f'), Integer(7, "Number of channels", 4), Integer(8, "Number of station comments", 3), VariableString(9, "Site name", 1, 60, 'UNLPS'), Integer(10, "Network identifier code", 3, xpath=33), Integer(11, "word order 32bit", 4), Integer(12, "word order 16bit", 2), VariableString(13, "Start effective date", 1, 22, 'T'), VariableString(14, "End effective date", 0, 22, 'T', optional=True, xseed_version='1.0'), VariableString(14, "End effective date", 0, 22, 'T', xseed_version='1.1'), FixedString(15, "Update flag", 1), FixedString(16, "Network Code", 2, 'ULN', version=2.3) ]
class Blockette052(Blockette): """ Blockette 052: Channel Identifier Blockette. Sample: 0520119 BHE0000004~001002+34.946200-106.456700+1740.0100.0090.0+00.0000112 2.000E+01 2.000E-030000CG~1991,042,20:48~~N """ id = 52 name = "Channel Identifier" fields = [ FixedString(3, "Location identifier", 2, 'UN'), FixedString(4, "Channel identifier", 3, 'UN'), Integer(5, "Subchannel identifier", 4), Integer(6, "Instrument identifier", 3, xpath=33), VariableString(7, "Optional comment", 0, 30, 'UNLPS'), Integer(8, "Units of signal response", 3, xpath=34), Integer(9, "Units of calibration input", 3, xpath=34), Float(10, "Latitude", 10, mask='%+2.6f'), Float(11, "Longitude", 11, mask='%+3.6f'), Float(12, "Elevation", 7, mask='%+4.1f'), Float(13, "Local depth", 5, mask='%3.1f'), Float(14, "Azimuth", 5, mask='%3.1f'), Float(15, "Dip", 5, mask='%+2.1f'), Integer(16, "Data format identifier code", 4, xpath=30), # The typo is intentional for XSEED 1.0 compatibility. Integer(17, "Data record length", 2, xseed_version='1.0', xml_tag="data_recored_length"), Integer(17, "Data record length", 2, xseed_version='1.1'), Float(18, "Sample rate", 10, mask='%1.4e'), Float(19, "Max clock drift", 10, mask='%1.4e'), Integer(20, "Number of comments", 4), VariableString(21, "Channel flags", 0, 26, 'U'), VariableString(22, "Start date", 1, 22, 'T'), VariableString(23, "End date", 0, 22, 'T', optional=True, xseed_version='1.0'), VariableString(23, "End date", 0, 22, 'T', xseed_version='1.1'), FixedString(24, "Update flag", 1) ]
class Blockette051(Blockette): """ Blockette 051: Station Comment Blockette. Sample: 05100351992,001~1992,002~0740000000 """ id = 51 name = "Station Comment" fields = [ VariableString(3, "Beginning effective time", 1, 22, 'T'), VariableString(4, "End effective time", 1, 22, 'T', optional=True), Integer(5, "Comment code key", 4, xpath=31), Integer(6, "Comment level", 6, ignore=True) ]
class Blockette059(Blockette): """ Blockette 059: Channel Comment Blockette. Sample: 05900351989,001~1989,004~4410000000 """ id = 59 name = "Channel Comment" fields = [ VariableString(3, "Beginning of effective time", 1, 22, 'T'), VariableString(4, "End effective time", 0, 22, 'T', optional=True), Integer(5, "Comment code key", 4, xpath=31), Integer(6, "Comment level", 6, ignore=True) ]
class Blockette032(Blockette): """ Blockette 032: Cited Source Dictionary Blockette. This blockette identifies the contributing institution that provides the hypocenter and magnitude information. This blockette is used in event oriented network volumes. """ id = 32 name = "Cited Source Dictionary" fields = [ Integer(3, "Source lookup code", 2), VariableString(4, "Name of publication author", 1, 70, 'UNLPS'), VariableString(5, "Date published catalog", 1, 70, 'UNLPS'), VariableString(6, "Publisher name", 1, 70, 'UNLPS'), ]
class Blockette034(Blockette): """ Blockette 034: Units Abbreviations Blockette. This blockette defines the units of measurement in a standard, repeatable way. Mention each unit of measurement only once. Sample: 0340044001M/S~Velocity in Meters Per Second~ """ id = 34 name = "Units Abbreviations" fields = [ Integer(3, "Unit lookup code", 3), VariableString(4, "Unit name", 1, 20, 'UNP'), VariableString(5, "Unit description", 0, 50, 'UNLPS') ]
class Blockette061(Blockette): """ Blockette 061: FIR Response Blockette. The FIR blockette is used to specify FIR (Finite Impulse Response) digital filter coefficients. It is an alternative to blockette [54] when specifying FIR filters. The blockette recognizes the various forms of filter symmetry and can exploit them to reduce the number of factors specified to the blockette. In July 2007, the FDSN adopted a convention that requires the coefficients to be listed in forward time order. As a reference, minimum-phase filters (which are asymmetric) should be written with the largest values near the beginning of the coefficient list. """ id = 61 name = "FIR Response" fields = [ Integer(3, "Stage sequence number", 2), VariableString(4, "Response Name", 1, 25, 'UN_'), FixedString(5, "Symmetry Code", 1, 'U'), Integer(6, "Signal In Units", 3, xpath=34), Integer(7, "Signal Out Units", 3, xpath=34), Integer(8, "Number of Coefficients", 4), #REPEAT field 9 for the Number of Coefficients Loop("FIR Coefficient", "Number of Coefficients", [Float(9, "FIR Coefficient", 14, mask='%+1.7e')], flat=True), ] def getRESP(self, station, channel, abbreviations): """ Returns RESP string. """ out = RESP % (station, channel, self.stage_sequence_number, self.symmetry_code, LookupCode(abbreviations, 34, 'unit_name', 'unit_lookup_code', self.signal_in_units), LookupCode(abbreviations, 34, 'unit_description', 'unit_lookup_code', self.signal_in_units), LookupCode(abbreviations, 34, 'unit_name', 'unit_lookup_code', self.signal_out_units), LookupCode(abbreviations, 34, 'unit_description', 'unit_lookup_code', self.signal_out_units), self.number_of_coefficients) if self.number_of_coefficients > 1: out += '#\t\tNumerator coefficients:\n' out += '#\t\t i, coefficient\n' for _i in range(self.number_of_coefficients): out += 'B061F09 %4s %13s\n' % \ (_i, formatRESP(self.FIR_coefficient[_i], 6)) elif self.number_of_coefficients == 1: out += '#\t\tNumerator coefficients:\n' out += '#\t\t i, coefficient\n' out += 'B061F09 %4s %13s\n' % \ (0, formatRESP(self.FIR_coefficient, 6)) out += '#\t\t\n' return out.encode()
class Blockette033(Blockette): """ Blockette 033: Generic Abbreviation Blockette. Sample: 0330055001(GSN) Global Seismograph Network (IRIS/USGS)~ """ id = 33 name = "Generic Abbreviation" fields = [ Integer(3, "Abbreviation lookup code", 3), VariableString(4, "Abbreviation description", 1, 50, 'UNLPS') ]
class Blockette047(Blockette): """ Blockette 047: Decimation Dictionary Blockette. See Decimation Blockette [57] for more information. """ id = 47 name = "Decimation Dictionary" fields = [ Integer(3, "Response Lookup Key", 4), VariableString(4, "Response Name", 1, 25, 'UN_'), Float(5, "Input sample rate", 10, mask='%1.4e'), Integer(6, "Decimation factor", 5, xseed_version='1.0', xml_tag="decimiation_factor"), Integer(6, "Decimation factor", 5, xseed_version='1.1'), Integer(7, "Decimation offset", 5), Float(8, "Estimated delay", 11, mask='%+1.4e'), Float(9, "Correction applied", 11, mask='%+1.4e') ] def getRESP(self, station, channel, abbreviations): """ Returns RESP string. """ string = \ '#\t\t+ +------------------------------+' + \ ' +\n' + \ '#\t\t+ | Decimation,' + \ '%6s ch %s | +\n' % (station, channel) + \ '#\t\t+ +------------------------------+' + \ ' +\n' + \ '#\t\t\n' + \ 'B047F05 Response input sample rate: %s\n' \ % formatRESP(self.input_sample_rate, 6) + \ 'B047F06 Response decimation factor: %s\n' \ % self.decimation_factor + \ 'B047F07 Response decimation offset: %s\n' \ % self.decimation_offset + \ 'B047F08 Response delay: %s\n' \ % formatRESP(self.estimated_delay, 6) + \ 'B047F09 Response correction: %s\n' \ % formatRESP(self.correction_applied, 6) + \ '#\t\t\n' return string
class Blockette031(Blockette): """ Blockette 031: Comment Description Blockette. Station operators, data collection centers, and data management centers can add descriptive comments to data to indicate problems encountered or special situations. Sample: 03100720750Stime correction does not include leap second, (-1000ms).~000 """ id = 31 name = "Comment Description" fields = [ Integer(3, "Comment code key", 4), FixedString(4, "Comment class code", 1), VariableString(5, "Description of comment", 1, 70, 'UNLPS'), Integer(6, "Units of comment level", 3, ignore=True) ]
class Blockette044(Blockette): """ Blockette 044: Response (Coefficients) Dictionary Blockette. See Response (Coefficients) Dictionary Blockette [54] for more information. """ id = 44 name = "Response Coefficients Dictionary" fields = [ Integer(3, "Response Lookup Key", 4), VariableString(4, "Response Name", 1, 25, 'UN_'), FixedString(5, "Response type", 1, 'U'), Integer(6, "Signal input units", 3, xpath=34), Integer(7, "Signal output units", 3, xpath=34), Integer(8, "Number of numerators", 4), # REPEAT fields 9 - 10 for the Number of numerators: Loop('Numerators', "Number of numerators", [ Float(9, "Numerator coefficient", 12, mask='%+1.5e'), Float(10, "Numerator error", 12, mask='%+1.5e') ], flat=True), Integer(11, "Number of denominators", 4), # REPEAT fields 12 — 13 for the Number of denominators: Loop('Denominators', "Number of denominators", [ Float(12, "Denominator coefficient", 12, mask='%+1.5e'), Float(13, "Denominator error", 12, mask='%+1.5e') ], flat=True) ] # Changes the name of the blockette because of an error in XSEED 1.0 def getXML(self, *args, **kwargs): xml = Blockette.getXML(self, *args, **kwargs) if self.xseed_version == '1.0': xml.tag = 'response_coefficients' return xml def getRESP(self, station, channel, abbreviations): """ Returns RESP string. """ string = \ '#\t\t+ +----------------------------------------' +\ '---+ +\n' + \ '#\t\t+ | Response (Coefficients),' + \ '%6s ch %s | +\n' % (station, channel) + \ '#\t\t+ +----------------------------------------' +\ '---+ +\n' + \ '#\t\t\n' + \ 'B044F05 Response type: %s\n' \ % self.response_type + \ 'B044F06 Response in units lookup: %s\n'\ % Blockette34Lookup(abbreviations, self.signal_input_units) + \ 'B044F07 Response out units lookup: %s\n'\ % Blockette34Lookup(abbreviations, self.signal_output_units) + \ 'B044F08 Number of numerators: %s\n' \ % self.number_of_numerators + \ 'B044F11 Number of denominators: %s\n' \ % self.number_of_denominators + \ '#\t\tNumerator coefficients:\n' + \ '#\t\t i, coefficient, error\n' if self.number_of_numerators: string += \ '#\t\tNumerator coefficients:\n' + \ '#\t\t i, coefficient, error\n' if self.number_of_numerators > 1: # Loop over all zeros. for _i in range(self.number_of_numerators): string += 'B044F09-10 %3s %13s %13s\n' % ( _i, formatRESP(self.numerator_coefficient[_i], 6), formatRESP(self.numerator_error[_i], 6)) else: string += 'B044F09-10 %3s %13s %13s\n' % ( 0, formatRESP(self.numerator_coefficient, 6), formatRESP(self.numerator_error, 6)) if self.number_of_denominators: string += \ '#\t\tDenominator coefficients:\n' + \ '#\t\t i, coefficient, error\n' if self.number_of_denominators > 1: # Loop over all zeros. for _i in range(self.number_of_numerators): string += 'B044F12-13 %3s %13s %13s\n' % ( _i, formatRESP(self.denominator_coefficient[_i], 6), formatRESP(self.denominator_error[_i], 6)) else: string += 'B044F12-13 %3s %13s %13s\n' % ( 0, formatRESP(self.denominator_coefficient, 6), formatRESP(self.denominator_error, 6)) string += '#\t\t\n' return string
class Blockette041(Blockette): """ Blockette 041: FIR Dictionary Blockette. The FIR blockette is used to specify FIR (Finite Impulse Response) digital filter coefficients. It is an alternative to blockette [44] when specifying FIR filters. The blockette recognizes the various forms of filter symmetry and can exploit them to reduce the number of factors specified in the blockette. See Response (Coefficients) Blockette [54] for more information. """ id = 41 name = "FIR Dictionary" fields = [ Integer(3, "Response Lookup Key", 4), VariableString(4, "Response Name", 1, 25, 'UN_'), FixedString(5, "Symmetry Code", 1, 'U'), Integer(6, "Signal In Units", 3, xpath=34), Integer(7, "Signal Out Units", 3, xpath=34), Integer(8, "Number of Factors", 4), #REPEAT field 9 for the Number of Factors Loop("FIR Coefficient", "Number of Factors", [Float(9, "FIR Coefficient", 14, mask='%+1.7e')], flat=True), ] def parseSEED(self, data, expected_length=0): """ If number of FIR coefficients are larger than maximal blockette size of 9999 chars a follow up blockette with the same blockette id and response lookup key is expected - this is checked here. """ # convert to stream for test issues if isinstance(data, basestring): expected_length = len(data) data = StringIO(data) # get current lookup key pos = data.tell() data.read(7) global_lookup_key = int(data.read(4)) data.seek(pos) # read first blockette temp = StringIO() temp.write(data.read(expected_length)) # check next blockettes while True: # save position pos = data.tell() try: blockette_id = int(data.read(3)) except ValueError: break if blockette_id != 41: # different blockette id -> break break blockette_length = int(data.read(4)) lookup_key = int(data.read(4)) if lookup_key != global_lookup_key: # different lookup key -> break break # ok follow up blockette found - skip some unneeded fields self.fields[1].read(data) self.fields[2].read(data) self.fields[3].read(data) self.fields[4].read(data) self.fields[5].read(data) # remaining length in current blockette length = pos - data.tell() + blockette_length # read follow up blockette and append it to temporary blockette temp.write(data.read(length)) # reposition file pointer data.seek(pos) # parse new combined temporary blockette temp.seek(0) Blockette.parseSEED(self, temp, expected_length=temp.len) def parseXML(self, xml_doc, *args, **kwargs): if self.xseed_version == '1.0': xml_doc.find('fir_coefficient').tag = 'FIR_coefficient' Blockette.parseXML(self, xml_doc, *args, **kwargs) def getXML(self, *args, **kwargs): xml = Blockette.getXML(self, *args, **kwargs) if self.xseed_version == '1.0': xml.find('FIR_coefficient').tag = 'fir_coefficient' return xml def getRESP(self, station, channel, abbreviations): """ Returns RESP string. """ string = \ '#\t\t+ +--------------------------------+' + \ ' +\n' + \ '#\t\t+ | FIR response,' + \ '%6s ch %s | +\n' % (station, channel) + \ '#\t\t+ +--------------------------------+' + \ ' +\n' + \ '#\t\t\n' + \ 'B041F05 Symmetry type: %s\n' \ % self.symmetry_code + \ 'B041F06 Response in units lookup: %s - %s\n'\ % (LookupCode(abbreviations, 34, 'unit_name', 'unit_lookup_code', self.signal_in_units), LookupCode(abbreviations, 34, 'unit_description', 'unit_lookup_code', self.signal_in_units)) + \ 'B041F07 Response out units lookup: %s - %s\n'\ % (LookupCode(abbreviations, 34, 'unit_name', 'unit_lookup_code', self.signal_out_units), LookupCode(abbreviations, 34, 'unit_description', 'unit_lookup_code', self.signal_out_units)) + \ 'B041F08 Number of numerators: %s\n' \ % self.number_of_factors if self.number_of_factors > 1: string += '#\t\tNumerator coefficients:\n' + \ '#\t\t i, coefficient\n' for _i in xrange(self.number_of_factors): string += 'B041F09 %4s %13s\n' \ % (_i, formatRESP(self.FIR_coefficient[_i], 6)) elif self.number_of_factors == 1: string += '#\t\tNumerator coefficients:\n' + \ '#\t\t i, coefficient\n' string += 'B041F09 %4s %13s\n' \ % (0, formatRESP(self.FIR_coefficient, 6)) string += '#\t\t\n' return string
def test_readCompactDateTime(self): field = VariableString(1, "test", 0, 22, 'T', strict=True, compact=True) #1 orig = '1992,002~' dt = field.read(StringIO(orig)) self.assertEqual(dt, UTCDateTime(1992, 1, 2)) self.assertEqual(field.write(dt), orig) #2 orig = '2007,199~' dt = field.read(StringIO(orig)) self.assertEqual(dt, UTCDateTime(2007, 7, 18)) self.assertEqual(field.write(dt), orig) #3 - wrong syntax orig = '1992' self.assertRaises(Exception, field.read, StringIO(orig)) orig = '1992,' self.assertRaises(Exception, field.read, StringIO(orig)) orig = '1992~' self.assertRaises(Exception, field.read, StringIO(orig)) orig = '1992,~' self.assertRaises(Exception, field.read, StringIO(orig)) #5 - empty datetime orig = '~' dt = field.read(StringIO(orig)) self.assertEqual(dt, '') self.assertEqual(field.write(dt), '~') #6 - bad syntax orig = '' dt = field.read(StringIO(orig)) self.assertEqual(dt, '') self.assertEqual(field.write(dt), '~') #7 orig = '2007,199' dt = field.read(StringIO(orig)) self.assertEqual(dt, UTCDateTime(2007, 7, 18)) self.assertEqual(field.write(dt), '2007,199~') #8 orig = '2009,074,12~' dt = field.read(StringIO(orig)) self.assertEqual(dt, UTCDateTime(2009, 3, 15, 12)) self.assertEqual(field.write(dt), orig) #9 orig = '2008,358,01:30:22.0012~' dt = field.read(StringIO(orig)) self.assertEqual(dt, UTCDateTime(2008, 12, 23, 01, 30, 22, 1200)) self.assertEqual(field.write(dt), orig) # orig = '2008,358,00:00:22~' dt = field.read(StringIO(orig)) self.assertEqual(dt, UTCDateTime(2008, 12, 23, 00, 00, 22, 0)) self.assertEqual(field.write(dt), orig) # orig = '2008,358,00:30~' dt = field.read(StringIO(orig)) self.assertEqual(dt, UTCDateTime(2008, 12, 23, 00, 30, 0, 0)) self.assertEqual(field.write(dt), orig) # orig = '2008,358,01~' dt = field.read(StringIO(orig)) self.assertEqual(dt, UTCDateTime(2008, 12, 23, 01, 0, 0, 0)) self.assertEqual(field.write(dt), orig) # orig = '2008,358~' dt = field.read(StringIO(orig)) self.assertEqual(dt, UTCDateTime(2008, 12, 23, 0, 0, 0, 0)) self.assertEqual(field.write(dt), orig) # orig = '2008,358,01:30:22.5~' dt = field.read(StringIO(orig)) self.assertEqual(dt, UTCDateTime(2008, 12, 23, 01, 30, 22, 500000)) self.assertEqual(field.write(dt), '2008,358,01:30:22.5000~')
def test_readDateTime(self): field = VariableString(1, "test", 1, 22, 'T', strict=True) #1 orig = '1992,002,00:00:00.0000~' dt = field.read(StringIO(orig)) self.assertEqual(dt, UTCDateTime(1992, 1, 2)) self.assertEqual(field.write(dt), '1992,002~') #1 orig = '1992,002~' dt = field.read(StringIO(orig)) self.assertEqual(dt, UTCDateTime(1992, 1, 2)) self.assertEqual(field.write(dt), '1992,002~') #2 orig = '1992,005,01:02:03.4567~' dt = field.read(StringIO(orig)) self.assertEqual(dt, UTCDateTime(1992, 1, 5, 1, 2, 3, 456700)) self.assertEqual(field.write(dt), orig) #3 orig = '1992,005,01:02:03.0001~' dt = field.read(StringIO(orig)) self.assertEqual(dt, UTCDateTime(1992, 1, 5, 1, 2, 3, 100)) self.assertEqual(field.write(dt), orig) #4 orig = '1992,005,01:02:03.1000~' dt = field.read(StringIO(orig)) self.assertEqual(dt, UTCDateTime(1992, 1, 5, 1, 2, 3, 100000)) self.assertEqual(field.write(dt), orig) #5 orig = '1987,023,04:23:05.1~' dt = field.read(StringIO(orig)) self.assertEqual(dt, UTCDateTime(1987, 1, 23, 4, 23, 5, 100000)) self.assertEqual(field.write(dt), '1987,023,04:23:05.1000~') #6 orig = '1987,023,04:23:05.123~' dt = field.read(StringIO(orig)) self.assertEqual(dt, UTCDateTime(1987, 1, 23, 4, 23, 5, 123000)) self.assertEqual(field.write(dt), '1987,023,04:23:05.1230~') # orig = '2008,358,01:30:22.0987~' dt = field.read(StringIO(orig)) self.assertEqual(dt, UTCDateTime(2008, 12, 23, 01, 30, 22, 98700)) self.assertEqual(field.write(dt), orig) # orig = '2008,358,01:30:22.9876~' dt = field.read(StringIO(orig)) self.assertEqual(dt, UTCDateTime(2008, 12, 23, 01, 30, 22, 987600)) self.assertEqual(field.write(dt), orig) # orig = '2008,358,01:30:22.0005~' dt = field.read(StringIO(orig)) self.assertEqual(dt, UTCDateTime(2008, 12, 23, 01, 30, 22, 500)) self.assertEqual(field.write(dt), orig) # orig = '2008,358,01:30:22.0000~' dt = field.read(StringIO(orig)) self.assertEqual(dt, UTCDateTime(2008, 12, 23, 01, 30, 22, 0)) self.assertEqual(field.write(dt), orig)
class Blockette043(Blockette): """ Blockette 043: Response (Poles & Zeros) Dictionary Blockette. See Response (Poles & Zeros) Blockette [53] for more information. """ id = 43 name = "Response Poles and Zeros Dictionary" fields = [ Integer(3, "Response Lookup Key", 4), VariableString(4, "Response Name", 1, 25, 'UN_'), FixedString(5, "Response type", 1, 'U'), Integer(6, "Stage signal input units", 3, xpath=34), Integer(7, "Stage signal output units", 3, xpath=34), Float(8, "A0 normalization factor", 12, mask='%+1.5e'), Float(9, "Normalization frequency", 12, mask='%+1.5e'), Integer(10, "Number of complex zeros", 3), # REPEAT fields 11 — 14 for the Number of complex zeros: Loop('Complex zero', "Number of complex zeros", [ Float(11, "Real zero", 12, mask='%+1.5e'), Float(12, "Imaginary zero", 12, mask='%+1.5e'), Float(13, "Real zero error", 12, mask='%+1.5e'), Float(14, "Imaginary zero error", 12, mask='%+1.5e') ]), Integer(15, "Number of complex poles", 3), # REPEAT fields 16 — 19 for the Number of complex poles: Loop('Complex pole', "Number of complex poles", [ Float(16, "Real pole", 12, mask='%+1.5e'), Float(16, "Imaginary pole", 12, mask='%+1.5e'), Float(18, "Real pole error", 12, mask='%+1.5e'), Float(19, "Imaginary pole error", 12, mask='%+1.5e') ]) ] # Changes the name of the blockette because of an error in XSEED 1.0 def getXML(self, *args, **kwargs): xml = Blockette.getXML(self, *args, **kwargs) if self.xseed_version == '1.0': xml.tag = 'response_poles_and_zeros' return xml def getRESP(self, station, channel, abbreviations): """ Returns RESP string. """ # Field five needs some extra parsing. field_five_dict = { 'A': 'A [Laplace Transform (Rad/sec)]', 'B': 'B [Analog (Hz)]', 'C': 'C [Composite]', 'D': 'D [Digital (Z-transform)]' } string = \ '#\t\t+ ' + \ '+-----------------------------------------' + \ '---+ +\n' + \ '#\t\t+ | Response (Poles & Zeros),' + \ '%6s ch %s | +\n' % (station, channel) + \ '#\t\t+ ' + \ '+-----------------------------------------' + \ '---+ +\n' + \ '#\t\t\n' + \ 'B043F05 Response type: %s\n' \ % field_five_dict[self.response_type] + \ 'B043F06 Response in units lookup: %s\n' \ % Blockette34Lookup(abbreviations, self.stage_signal_input_units) + \ 'B043F07 Response out units lookup: %s\n' \ % Blockette34Lookup(abbreviations, self.stage_signal_output_units) + \ 'B043F08 A0 normalization factor: %G\n'\ % self.A0_normalization_factor + \ 'B043F09 Normalization frequency: %G\n'\ % self.normalization_frequency + \ 'B043F10 Number of zeroes: %s\n'\ % self.number_of_complex_zeros + \ 'B043F15 Number of poles: %s\n'\ % self.number_of_complex_poles + \ '#\t\tComplex zeroes:\n' + \ '#\t\t i real imag real_error imag_error\n' if self.number_of_complex_zeros > 0: if self.number_of_complex_zeros != 1: # Loop over all zeros. for _i in range(self.number_of_complex_zeros): string += 'B043F11-14 %4s %13s %13s %13s %13s\n' % ( _i, formatRESP(self.real_zero[_i], 6), formatRESP(self.imaginary_zero[_i], 6), formatRESP(self.real_zero_error[_i], 6), formatRESP(self.imaginary_zero_error[_i], 6)) else: string += 'B043F11-14 %4s %13s %13s %13s %13s\n' % ( 0, formatRESP(self.real_zero, 6), formatRESP(self.imaginary_zero, 6), formatRESP(self.real_zero_error, 6), formatRESP(self.imaginary_zero_error, 6)) string += '#\t\tComplex poles:\n' + \ '#\t\t i real imag real_error imag_error\n' if self.number_of_complex_poles > 0: if self.number_of_complex_poles != 1: # Loop over all poles. for _i in range(self.number_of_complex_poles): string += 'B043F16-19 %4s %13s %13s %13s %13s\n' % ( _i, formatRESP(self.real_pole[_i], 6), formatRESP(self.imaginary_pole[_i], 6), formatRESP(self.real_pole_error[_i], 6), formatRESP(self.imaginary_pole_error[_i], 6)) else: string += 'B043F16-19 %4s %13s %13s %13s %13s\n' % ( 0, formatRESP(self.real_pole, 6), formatRESP(self.imaginary_pole, 6), formatRESP(self.real_pole_error, 6), formatRESP(self.imaginary_pole_error, 6)) string += '#\t\t\n' return string
class Blockette058(Blockette): """ Blockette 058: Channel Sensitivity/Gain Blockette. When used as a gain (stage ≠ 0), this blockette is the gain for this stage at the given frequency. Different stages may be at different frequencies. However, it is strongly recommended that the same frequency be used in all stages of a cascade, if possible. When used as a sensitivity(stage=0), this blockette is the sensitivity (in counts per ground motion) for the entire channel at a given frequency, and is also referred to as the overall gain. The frequency here may be different from the frequencies in the gain specifications, but should be the same if possible. If you use cascading (more than one filter stage), then SEED requires a gain for each stage. A final sensitivity (Blockette [58], stage = 0, is required. If you do not use cascading (only one stage), then SEED must see a gain, a sensitivity, or both. Sample: 0580035 3 3.27680E+03 0.00000E+00 0 """ id = 58 name = "Channel Sensitivity Gain" fields = [ Integer(3, "Stage sequence number", 2), Float(4, "Sensitivity gain", 12, mask='%+1.5e'), Float(5, "Frequency", 12, mask='%+1.5e'), Integer(6, "Number of history values", 2), # REPEAT fields 7 — 9 for the Number of history values: Loop('History', "Number of history values", [ Float(7, "Sensitivity for calibration", 12, mask='%+1.5e'), Float(8, "Frequency of calibration sensitivity", 12, mask='%+1.5e'), VariableString(9, "Time of above calibration", 1, 22, 'T') ]) ] def getRESP(self, station, channel, abbreviations): """ Returns RESP string. """ # This blockette can result in two different RESPs. blkt_type = self.stage_sequence_number if blkt_type != 0: string = \ '#\t\t+ +-------------------------------' + \ '--------+ +\n' + \ '#\t\t+ | Channel Gain,' + \ '%6s ch %s | +\n' % (station, channel) +\ '#\t\t+ +-------------------------------' + \ '--------+ +\n' else: string = \ '#\t\t+ +--------------------------------' + \ '-------+ +\n' + \ '#\t\t+ | Channel Sensitivity,' + \ '%6s ch %s | +\n' % (station, channel) + \ '#\t\t+ +--------------------------------' + \ '-------+ +\n' string += '#\t\t\n' + \ 'B058F03 Stage sequence number: %s\n' \ % blkt_type if blkt_type != 0: string += \ 'B058F04 Gain: %s\n' \ % formatRESP(self.sensitivity_gain, 6) + \ 'B058F05 Frequency of gain: %s HZ\n' \ % formatRESP(self.frequency, 6) else: string += \ 'B058F04 Sensitivity: %s\n' \ % formatRESP(self.sensitivity_gain, 6) + \ 'B058F05 Frequency of sensitivity: %s HZ\n' \ % formatRESP(self.frequency, 6) string += \ 'B058F06 Number of calibrations: %s\n' \ % self.number_of_history_values if self.number_of_history_values > 1: string += \ '#\t\tCalibrations:\n' + \ '#\t\t i, sensitivity, frequency, time of calibration\n' for _i in xrange(self.number_of_history_values): string += \ 'B058F07-08 %2s %13s %13s %s\n' \ % (formatRESP(self.sensitivity_for_calibration[_i], 6), formatRESP( self.frequency_of_calibration_sensitivity[_i], 6), self.time_of_above_calibration[_i].formatSEED()) elif self.number_of_history_values == 1: string += \ '#\t\tCalibrations:\n' + \ '#\t\t i, sensitivity, frequency, time of calibration\n' + \ 'B058F07-08 0 %13s %13s %s\n' \ % (formatRESP(self.sensitivity_for_calibration, 6), formatRESP(self.frequency_of_calibration_sensitivity, 6), self.time_of_above_calibration.formatSEED()) string += '#\t\t\n' return string
def test_readDateTime(self): field = VariableString(1, "test", 1, 22, 'T', strict=True) #1 orig = '1992,002,00:00:00.0000~' dt = field.read(StringIO(orig)) self.assertEquals(dt, UTCDateTime(1992, 1, 2)) self.assertEquals(field.write(dt), '1992,002~') #1 orig = '1992,002~' dt = field.read(StringIO(orig)) self.assertEquals(dt, UTCDateTime(1992, 1, 2)) self.assertEquals(field.write(dt), '1992,002~') #2 orig = '1992,005,01:02:03.4567~' dt = field.read(StringIO(orig)) self.assertEquals(dt, UTCDateTime(1992, 1, 5, 1, 2, 3, 456700)) self.assertEquals(field.write(dt), orig) #3 orig = '1992,005,01:02:03.0001~' dt = field.read(StringIO(orig)) self.assertEquals(dt, UTCDateTime(1992, 1, 5, 1, 2, 3, 100)) self.assertEquals(field.write(dt), orig) #4 orig = '1992,005,01:02:03.1000~' dt = field.read(StringIO(orig)) self.assertEquals(dt, UTCDateTime(1992, 1, 5, 1, 2, 3, 100000)) self.assertEquals(field.write(dt), orig) #5 orig = '1987,023,04:23:05.1~' dt = field.read(StringIO(orig)) self.assertEquals(dt, UTCDateTime(1987, 1, 23, 4, 23, 5, 100000)) self.assertEquals(field.write(dt), '1987,023,04:23:05.1000~') #6 orig = '1987,023,04:23:05.123~' dt = field.read(StringIO(orig)) self.assertEquals(dt, UTCDateTime(1987, 1, 23, 4, 23, 5, 123000)) self.assertEquals(field.write(dt), '1987,023,04:23:05.1230~') # orig = '2008,358,01:30:22.0987~' dt = field.read(StringIO(orig)) self.assertEquals(dt, UTCDateTime(2008, 12, 23, 01, 30, 22, 98700)) self.assertEquals(field.write(dt), orig) # orig = '2008,358,01:30:22.9876~' dt = field.read(StringIO(orig)) self.assertEquals(dt, UTCDateTime(2008, 12, 23, 01, 30, 22, 987600)) self.assertEquals(field.write(dt), orig) # orig = '2008,358,01:30:22.0005~' dt = field.read(StringIO(orig)) self.assertEquals(dt, UTCDateTime(2008, 12, 23, 01, 30, 22, 500)) self.assertEquals(field.write(dt), orig) # orig = '2008,358,01:30:22.0000~' dt = field.read(StringIO(orig)) self.assertEquals(dt, UTCDateTime(2008, 12, 23, 01, 30, 22, 0)) self.assertEquals(field.write(dt), orig)
class Blockette048(Blockette): """ Blockette 048: Channel Sensitivity/Gain Dictionary Blockette. See Channel Sensitivity/Gain Blockette [58] for more information. """ id = 48 name = "Channel Sensivitity Gain Dictionary" fields = [ Integer(3, "Response Lookup Key", 4), VariableString(4, "Response Name", 1, 25, 'UN_'), Float(5, "Sensitivity gain", 12, mask='%+1.5e'), Float(6, "Frequency", 12, mask='%+1.5e'), Integer(7, "Number of history values", 2), # REPEAT fields 8 — 10 for the Number of history values: Loop('History', "Number of history values", [ Float(8, "Sensitivity for calibration", 12, mask='%+1.5e'), Float(9, "Frequency of calibration sensitivity", 12, mask='%+1.5e'), VariableString(10, "Time of above calibration", 1, 22, 'T') ]) ] def getRESP(self, station, channel, abbreviations): """ Returns RESP string. """ string = \ '#\t\t+ ' + \ '+---------------------------------------+' + \ ' +\n' + \ '#\t\t+ | Channel Sensitivity,' + \ '%6s ch %s | +\n' % (station, channel) + \ '#\t\t+ ' + \ '+---------------------------------------+' + \ ' +\n' + \ '#\t\t\n' + \ 'B048F05 Sensitivity: %s\n' \ % formatRESP(self.sensitivity_gain, 6) + \ 'B048F06 Frequency of sensitivity: %s\n' \ % formatRESP(self.frequency, 6) + \ 'B048F07 Number of calibrations: %s\n' \ % self.number_of_history_values if self.number_of_history_values > 1: string += \ '#\t\tCalibrations:\n' + \ '#\t\t i, sensitivity, frequency, time of calibration\n' for _i in range(self.number_of_history_values): string += \ 'B048F08-09 %2s %13s %13s %s\n' \ % (formatRESP(self.sensitivity_for_calibration[_i], 6), formatRESP( self.frequency_of_calibration_sensitivity[_i], 6), self.time_of_above_calibration[_i].formatSEED()) elif self.number_of_history_values == 1: string += \ '#\t\tCalibrations:\n' + \ '#\t\t i, sensitivity, frequency, time of calibration\n' + \ 'B048F08-09 0 %13s %13s %s\n' % ( formatRESP(self.sensitivity_for_calibration, 6), formatRESP(self.frequency_of_calibration_sensitivity, 6), self.time_of_above_calibration.formatSEED()) string += '#\t\t\n' return string
def test_readCompactDateTime(self): field = VariableString(1, "test", 0, 22, 'T', strict=True, compact=True) #1 orig = '1992,002~' dt = field.read(StringIO(orig)) self.assertEquals(dt, UTCDateTime(1992, 1, 2)) self.assertEquals(field.write(dt), orig) #2 orig = '2007,199~' dt = field.read(StringIO(orig)) self.assertEquals(dt, UTCDateTime(2007, 7, 18)) self.assertEquals(field.write(dt), orig) #3 - wrong syntax orig = '1992' self.assertRaises(Exception, field.read, StringIO(orig)) orig = '1992,' self.assertRaises(Exception, field.read, StringIO(orig)) orig = '1992~' self.assertRaises(Exception, field.read, StringIO(orig)) orig = '1992,~' self.assertRaises(Exception, field.read, StringIO(orig)) #5 - empty datetime orig = '~' dt = field.read(StringIO(orig)) self.assertEquals(dt, '') self.assertEquals(field.write(dt), '~') #6 - bad syntax orig = '' dt = field.read(StringIO(orig)) self.assertEquals(dt, '') self.assertEquals(field.write(dt), '~') #7 orig = '2007,199' dt = field.read(StringIO(orig)) self.assertEquals(dt, UTCDateTime(2007, 7, 18)) self.assertEquals(field.write(dt), '2007,199~') #8 orig = '2009,074,12~' dt = field.read(StringIO(orig)) self.assertEquals(dt, UTCDateTime(2009, 3, 15, 12)) self.assertEquals(field.write(dt), orig) #9 orig = '2008,358,01:30:22.0012~' dt = field.read(StringIO(orig)) self.assertEquals(dt, UTCDateTime(2008, 12, 23, 01, 30, 22, 1200)) self.assertEquals(field.write(dt), orig) # orig = '2008,358,00:00:22~' dt = field.read(StringIO(orig)) self.assertEquals(dt, UTCDateTime(2008, 12, 23, 00, 00, 22, 0)) self.assertEquals(field.write(dt), orig) # orig = '2008,358,00:30~' dt = field.read(StringIO(orig)) self.assertEquals(dt, UTCDateTime(2008, 12, 23, 00, 30, 0, 0)) self.assertEquals(field.write(dt), orig) # orig = '2008,358,01~' dt = field.read(StringIO(orig)) self.assertEquals(dt, UTCDateTime(2008, 12, 23, 01, 0, 0, 0)) self.assertEquals(field.write(dt), orig) # orig = '2008,358~' dt = field.read(StringIO(orig)) self.assertEquals(dt, UTCDateTime(2008, 12, 23, 0, 0, 0, 0)) self.assertEquals(field.write(dt), orig) # orig = '2008,358,01:30:22.5~' dt = field.read(StringIO(orig)) self.assertEquals(dt, UTCDateTime(2008, 12, 23, 01, 30, 22, 500000)) self.assertEquals(field.write(dt), '2008,358,01:30:22.5000~')