def test_sacpaz_from_dataless(self): # The following dictionary is extracted from a datalessSEED # file pazdict = { 'sensitivity': 2516580000.0, 'digitizer_gain': 1677720.0, 'seismometer_gain': 1500.0, 'zeros': [0j, 0j], 'gain': 59198800.0, 'poles': [(-0.037010000000000001 + 0.037010000000000001j), (-0.037010000000000001 - 0.037010000000000001j), (-131 + 467.30000000000001j), (-131 - 467.30000000000001j), (-251.30000000000001 + 0j)] } tr = Trace() # This file was extracted from the datalessSEED file using rdseed pazfile = os.path.join(os.path.dirname(__file__), 'data', 'SAC_PZs_NZ_HHZ_10') attach_paz(tr, pazfile, todisp=False) sacconstant = pazdict['digitizer_gain'] * \ pazdict['seismometer_gain'] * pazdict['gain'] np.testing.assert_almost_equal(tr.stats.paz['gain'] / 1e17, sacconstant / 1e17, decimal=6) # pole-zero files according to the SAC convention are in displacement self.assertEqual(len(tr.stats.paz['zeros']), 3)
def test_sac_instrument_correction(self): # SAC recommends to taper the transfer function if a pure # deconvolution is done instead of simulating a different # instrument. This test checks the difference between the # result from removing the instrument response using SAC or # ObsPy. Visual inspection shows that the traces are pretty # much identical but differences remain (rms ~ 0.042). Haven't # found the cause for those, yet. One possible reason is the # floating point arithmetic of SAC vs. the double precision # arithmetic of Python. However differences still seem to be # too big for that. pzf = os.path.join(self.path, 'SAC_PZs_KARC_BHZ') sacf = os.path.join(self.path, 'KARC.LHZ.SAC.asc.gz') testsacf = os.path.join(self.path, 'KARC_corrected.sac.asc.gz') plow = 160. phigh = 4. fl1 = 1.0 / (plow + 0.0625 * plow) fl2 = 1.0 / plow fl3 = 1.0 / phigh fl4 = 1.0 / (phigh - 0.25 * phigh) # Uncomment the following to run the sac-commands # that created the testing file # if 1: # import subprocess as sp # p = sp.Popen('sac',shell=True,stdin=sp.PIPE) # cd1 = p.stdin # print("r %s"%sacf, file=cd1) # print("rmean", file=cd1) # print("rtrend", file=cd1) # print("taper type cosine width 0.03", file=cd1) # print("transfer from polezero subtype %s to none \ # freqlimits %f %f %f %f" % (pzf, fl1, fl2, fl3, fl4), file=cd1) # print("w over ./data/KARC_corrected.sac", file=cd1) # print("quit", file=cd1) # cd1.close() # p.wait() stats = {'network': 'KA', 'delta': 0.99999988079072466, 'station': 'KARC', 'location': 'S1', 'starttime': UTCDateTime(2001, 2, 13, 0, 0, 0, 993700), 'calib': 1.00868e+09, 'channel': 'BHZ'} with gzip.open(sacf) as f: tr = Trace(np.loadtxt(f), stats) attach_paz(tr, pzf, tovel=False) tr.data = simulate_seismometer( tr.data, tr.stats.sampling_rate, paz_remove=tr.stats.paz, remove_sensitivity=False, pre_filt=(fl1, fl2, fl3, fl4)) with gzip.open(testsacf) as f: data = np.loadtxt(f) # import matplotlib.pyplot as plt # plt.plot(tr.data) # plt.plot(data) # plt.show() rms = np.sqrt(np.sum((tr.data - data) ** 2) / np.sum(tr.data ** 2)) self.assertTrue(rms < 0.0421)
def test_attach_paz_diff_order(self): pazfile = os.path.join(os.path.dirname(__file__), 'data', 'NZCRLZ_HHZ10.pz') tr = Trace() attach_paz(tr, pazfile) np.testing.assert_array_almost_equal(tr.stats.paz['gain'], 7.4592e-2, decimal=6) self.assertEqual(len(tr.stats.paz['zeros']), 5) self.assertEqual(len(tr.stats.paz['poles']), 4)
def _read_paz(path): """ Read a directory with paz files or a single file. Limitations: (1) directory must contain *only* paz files (2) paz file can optionally have ".pz" or ".paz" suffixes (3) paz file name (without prefix and suffix) *has* to have the trace_id (NET.STA.LOC.CHAN) of the corresponding trace in the last part of his name (e.g., 20110208_1600.NOW.IV.CRAC.00.EHZ.paz) """ if path is None: return None logger.info('Reading PAZ...') paz = dict() if os.path.isdir(path): listing = os.listdir(path) # check if files have a common prefix: we will strip it later prefix = os.path.commonprefix(listing) for filename in listing: fullpath = os.path.join(path, filename) try: # This is a horrible hack! # Since attach_paz needs a trace, # we create a trace and then, later, # we just retrieve the paz object # from the trace ;) tr = Trace() attach_paz(tr, fullpath) bname = os.path.basename(filename) # strip .pz suffix, if there bname = re.sub('.pz$', '', bname) # strip .paz suffix, if there bname = re.sub('.paz$', '', bname) # and strip any common prefix bname = re.sub('^' + prefix, '', bname) # we assume that the last four fields of bname # (separated by '.') are the trace_id trace_id = '.'.join(bname.split('.')[-4:]) paz[trace_id] = tr.stats.paz.copy() except IOError: continue elif os.path.isfile(path): # If a filename is provided, store it as # 'default' paz. filename = path tr = Trace() attach_paz(tr, filename) paz['default'] = tr.stats.paz.copy() logger.info('Reading PAZ: done') return paz
def removeInstrument(st,args): if(args.sim == 'PZs'): # prefilters f = args.flim.split() f0 = eval(f[0]) f1 = eval(f[1]) f2 = eval(f[2]) f3 = eval(f[3]) toPurge= [] # station to purge if no Paz found for i in range(len(st)): # attach poles and zeros instrument if(args.dva=='1'): try: attach_paz(st[i], st[i].stats.PZs_file,todisp=False) except: print "No appropriate PZs file found for station " + st[i].stats.station,st[i].stats.channel,st[i].stats.network toPurge.append(st[i].stats.station) else: try: attach_paz(st[i], st[i].stats.PZs_file,tovel=True) except: print "No appropriate PZs file found for station " + st[i].stats.station,st[i].stats.channel,st[i].stats.network toPurge.append(st[i].stats.station) # remove stations if len(toPurge>0) if len(toPurge) > 0: st = purgeListStation(st,toPurge,'r') print "Check if station/channel/network/location of the PZs files and the same string within loaded binary files " print "do correspond. It may occour for instance that the headers strings of the waveform files (e.g. sac, fseed) " print "do not agrees with the same strings of the PZs name files. For instance the name of the network. " print "If these strings do not correspond, modify the name of the PZs files or the header values of the waveforms" print "You may also choose to remove this station using the option --purge (see help for details)" # now do remove for i in range(len(st)): # remove instrument to displacement # st[i].data=detrend(st[i].data) st[i].data = simulate_seismometer(st[i].data,st[i].stats.sampling_rate,paz_remove=st[i].stats.paz, \ taper=True, taper_fraction=0.050, pre_filt=(f0,f1,f2,f3)) #,water_level=60.0) # from meters to centimeters st[i].data = st[i].data * 100 return st
def test_sacpaz_from_resp(self): # The following two files were both extracted from a dataless # seed file using rdseed respfile = os.path.join(os.path.dirname(__file__), 'data', 'RESP.NZ.CRLZ.10.HHZ') sacpzfile = os.path.join(os.path.dirname(__file__), 'data', 'SAC_PZs_NZ_CRLZ_HHZ') # This is a rather lengthy test, in which the # poles, zeros and the gain of each instrument response file # are converted into the corresponding velocity frequency response # function which have to be sufficiently close. Possibly due to # different truncations in the RESP-formatted and SAC-formatted # response files the frequency response functions are not identical. tr1 = Trace() tr2 = Trace() attach_resp(tr1, respfile, torad=True, todisp=False) attach_paz(tr2, sacpzfile, torad=False, tovel=True) p1 = tr1.stats.paz.poles z1 = tr1.stats.paz.zeros g1 = tr1.stats.paz.gain t_samp = 0.01 n = 32768 fy = 1 / (t_samp * 2.0) # start at zero to get zero for offset/ DC of fft f = np.arange(0, fy + fy / n, fy / n) # arange should includes fy w = f * 2 * np.pi s = 1j * w a1 = np.poly(p1) b1 = g1 * np.poly(z1) h1 = np.polyval(b1, s) / np.polyval(a1, s) h1 = np.conj(h1) h1[-1] = h1[-1].real + 0.0j p2 = tr2.stats.paz.poles z2 = tr2.stats.paz.zeros g2 = tr2.stats.paz.gain a2 = np.poly(p2) b2 = g2 * np.poly(z2) h2 = np.polyval(b2, s) / np.polyval(a2, s) h2 = np.conj(h2) h2[-1] = h2[-1].real + 0.0j amp1 = abs(h1) amp2 = abs(h2) phase1 = np.unwrap(np.arctan2(-h1.imag, h1.real)) phase2 = np.unwrap(np.arctan2(-h2.imag, h2.real)) np.testing.assert_almost_equal(phase1, phase2, decimal=4) rms = np.sqrt(np.sum((amp1 - amp2) ** 2) / np.sum(amp2 ** 2)) self.assertTrue(rms < 2.02e-06) self.assertTrue(tr1.stats.paz.t_shift, 0.4022344)
def test_attach_paz(self): fvelhz = io.StringIO("""ZEROS 3 -5.032 0.0 POLES 6 -0.02365 0.02365 -0.02365 -0.02365 -39.3011 0. -7.74904 0. -53.5979 21.7494 -53.5979 -21.7494 CONSTANT 2.16e18""") tr = Trace() attach_paz(tr, fvelhz, torad=True, todisp=True) np.testing.assert_array_almost_equal(tr.stats.paz['zeros'][0], - 31.616988, decimal=6) self.assertEqual(len(tr.stats.paz['zeros']), 4)
def decon(stf, PZ=None, lowf=0.005, highf=0.008): nqf = stf[0].stats.sampling_rate / 2 pre_filt = [lowf, highf, nqf - 2, nqf] if stf[0].stats.station == 'HNR': stf.remove_response(pre_filt=pre_filt, output='disp') else: for i in range(3): attach_paz(stf[i], PZ[0]) paz = dict(stf[0].stats.paz) stf.simulate(paz_remove=paz, pre_filt=pre_filt) stf.taper(0.05, type='hann') return stf
def test_sacpaz_from_dataless(self): # The following dictionary is extracted from a datalessSEED # file pazdict = {'sensitivity': 2516580000.0, 'digitizer_gain': 1677720.0, 'seismometer_gain': 1500.0, 'zeros': [0j, 0j], 'gain': 59198800.0, 'poles': [(-0.037010000000000001 + 0.037010000000000001j), (-0.037010000000000001 - 0.037010000000000001j), (-131 + 467.30000000000001j), (-131 - 467.30000000000001j), (-251.30000000000001 + 0j)]} tr = Trace() # This file was extracted from the datalessSEED file using rdseed pazfile = os.path.join(os.path.dirname(__file__), 'data', 'SAC_PZs_NZ_HHZ_10') attach_paz(tr, pazfile, todisp=False) sacconstant = pazdict['digitizer_gain'] * \ pazdict['seismometer_gain'] * pazdict['gain'] np.testing.assert_almost_equal(tr.stats.paz['gain'] / 1e17, sacconstant / 1e17, decimal=6) # pole-zero files according to the SAC convention are in displacement self.assertEqual(len(tr.stats.paz['zeros']), 3)
def _add_paz_and_coords(trace, dataless, paz_dict=None): trace.stats.paz = None trace.stats.coords = None traceid = trace.get_id() time = trace.stats.starttime # We first look into the dataless dictionary, if available if isinstance(dataless, dict): for sp in dataless.values(): # Check first if our traceid is in the dataless file if traceid not in str(sp): continue try: paz = AttribDict(sp.get_paz(traceid, time)) coords = AttribDict(sp.get_coordinates(traceid, time)) except SEEDParserException as err: logger.error('%s time: %s' % (err, str(time))) pass elif isinstance(dataless, Inventory): try: with warnings.catch_warnings(record=True) as warns: # get_sacpz() can issue warnings on more than one PAZ found, # so let's catch those warnings and log them properly sacpz = dataless.get_response(traceid, time).get_sacpz() for w in warns: message = str(w.message) logger.warning('%s: %s' % (traceid, message)) attach_paz(trace, io.StringIO(sacpz)) paz = trace.stats.paz coords = AttribDict(dataless.get_coordinates(traceid, time)) except Exception as err: logger.error('%s traceid: %s time: %s' % (err, traceid, str(time))) pass try: trace.stats.paz = paz # elevation is in meters in the dataless coords.elevation /= 1000. trace.stats.coords = coords except Exception: pass # If we couldn't find any PAZ in the dataless dictionary, # we try to attach paz from the paz dictionary passed # as argument if trace.stats.paz is None and paz_dict is not None: # Look for traceid or for a generic paz net, sta, loc, chan = trace.id.split('.') ids = [ trace.id, '.'.join(('__', '__', '__', '__')), '.'.join( (net, '__', '__', '__')), '.'.join((net, sta, '__', '__')), '.'.join((net, sta, loc, '__')), 'default' ] for id in ids: try: paz = paz_dict[id] trace.stats.paz = paz except KeyError: pass # If we're still out of luck, # we try to build the sensitivity from the # user2 and user3 header fields (ISNet format) if trace.stats.paz is None and trace.stats.format == 'ISNet': try: # instrument constants u2 = trace.stats.sac.user2 u3 = trace.stats.sac.user3 paz = AttribDict() paz.sensitivity = u3 / u2 paz.poles = [] paz.zeros = [] paz.gain = 1 trace.stats.paz = paz except AttributeError: pass # Still no paz? Antilles or IPOC format! if (trace.stats.paz is None and (trace.stats.format == 'Antilles' or trace.stats.format == 'IPOC')): paz = AttribDict() paz.sensitivity = 1 paz.poles = [] paz.zeros = [] paz.gain = 1 trace.stats.paz = paz # If we still don't have trace coordinates, # we try to get them from SAC header if trace.stats.coords is None: try: stla = trace.stats.sac.stla stlo = trace.stats.sac.stlo try: stel = trace.stats.sac.stel # elevation is in meters in SAC header: stel /= 1000. except AttributeError: stel = 0. coords = AttribDict() coords.elevation = stel coords.latitude = stla coords.longitude = stlo trace.stats.coords = coords except AttributeError: pass # Still no coords? Raise an exception if trace.stats.coords is None: raise Exception('%s: could not find coords for trace: skipping trace' % traceid)
def test_sac_instrument_correction(self): # SAC recommends to taper the transfer function if a pure # deconvolution is done instead of simulating a different # instrument. This test checks the difference between the # result from removing the instrument response using SAC or # ObsPy. Visual inspection shows that the traces are pretty # much identical but differences remain (rms ~ 0.042). Haven't # found the cause for those, yet. One possible reason is the # floating point arithmetic of SAC vs. the double precision # arithmetic of Python. However differences still seem to be # too big for that. pzf = os.path.join(self.path, 'SAC_PZs_KARC_BHZ') sacf = os.path.join(self.path, 'KARC.LHZ.SAC.asc.gz') testsacf = os.path.join(self.path, 'KARC_corrected.sac.asc.gz') plow = 160. phigh = 4. fl1 = 1.0 / (plow + 0.0625 * plow) fl2 = 1.0 / plow fl3 = 1.0 / phigh fl4 = 1.0 / (phigh - 0.25 * phigh) # Uncomment the following to run the sac-commands # that created the testing file # if 1: # import subprocess as sp # p = sp.Popen('sac',shell=True,stdin=sp.PIPE) # cd1 = p.stdin # print("r %s"%sacf, file=cd1) # print("rmean", file=cd1) # print("rtrend", file=cd1) # print("taper type cosine width 0.03", file=cd1) # print("transfer from polezero subtype %s to none \ # freqlimits %f %f %f %f" % (pzf, fl1, fl2, fl3, fl4), file=cd1) # print("w over ./data/KARC_corrected.sac", file=cd1) # print("quit", file=cd1) # cd1.close() # p.wait() stats = { 'network': 'KA', 'delta': 0.99999988079072466, 'station': 'KARC', 'location': 'S1', 'starttime': UTCDateTime(2001, 2, 13, 0, 0, 0, 993700), 'calib': 1.00868e+09, 'channel': 'BHZ' } with gzip.open(sacf) as f: tr = Trace(np.loadtxt(f), stats) attach_paz(tr, pzf, tovel=False) tr.data = simulate_seismometer(tr.data, tr.stats.sampling_rate, paz_remove=tr.stats.paz, remove_sensitivity=False, pre_filt=(fl1, fl2, fl3, fl4)) with gzip.open(testsacf) as f: data = np.loadtxt(f) # import matplotlib.pyplot as plt # plt.plot(tr.data) # plt.plot(data) # plt.show() rms = np.sqrt(np.sum((tr.data - data)**2) / np.sum(tr.data**2)) self.assertTrue(rms < 0.0421)
def _add_paz_and_coords(trace, metadata, paz_dict, config): traceid = trace.get_id() # If we already know that traceid is skipped, raise a silent exception if traceid in _add_paz_and_coords.skipped: raise Exception() trace.stats.paz = None trace.stats.coords = None time = trace.stats.starttime # We first check whether metadata is a dataless dictionary if isinstance(metadata, dict): for sp in metadata.values(): # Check first if our traceid is in the dataless file if traceid not in str(sp): continue try: paz = AttribDict(sp.get_paz(traceid, time)) coords = AttribDict(sp.get_coordinates(traceid, time)) except SEEDParserException as err: logger.error('%s time: %s' % (err, str(time))) pass elif isinstance(metadata, Inventory): try: with warnings.catch_warnings(record=True) as warns: # get_sacpz() can issue warnings on more than one PAZ found, # so let's catch those warnings and log them properly sacpz = metadata.get_response(traceid, time).get_sacpz() for w in warns: message = str(w.message) logger.warning('%s: %s' % (traceid, message)) attach_paz(trace, io.StringIO(sacpz)) paz = trace.stats.paz coords = AttribDict(metadata.get_coordinates(traceid, time)) except Exception as err: logger.error('%s traceid: %s time: %s' % (err, traceid, str(time))) pass try: trace.stats.paz = paz # elevation is in meters coords.elevation /= 1000. trace.stats.coords = coords except Exception: pass # If we couldn't find any PAZ in the dataless dictionary # or in the Inventory, we try to attach paz from a paz dictionary if trace.stats.paz is None and paz_dict is not None: # Look for traceid or for a generic paz net, sta, loc, chan = trace.id.split('.') ids = [ trace.id, '.'.join(('__', '__', '__', '__')), '.'.join((net, '__', '__', '__')), '.'.join((net, sta, '__', '__')), '.'.join((net, sta, loc, '__')), 'default' ] for id in ids: try: paz = paz_dict[id] trace.stats.paz = paz except KeyError: pass # If a "sensitivity" config option is provided, override the paz computed # from metadata or paz_dict if config.sensitivity is not None: # instrument constants paz = AttribDict() paz.sensitivity = _compute_sensitivity(trace, config) paz.poles = [] paz.zeros = [] paz.gain = 1 trace.stats.paz = paz # If we still don't have trace coordinates, # we try to get them from SAC header if trace.stats.coords is None: try: stla = trace.stats.sac.stla stlo = trace.stats.sac.stlo try: stel = trace.stats.sac.stel # elevation is in meters in SAC header: stel /= 1000. except AttributeError: stel = 0. coords = AttribDict() coords.elevation = stel coords.latitude = stla coords.longitude = stlo trace.stats.coords = coords except AttributeError: pass # Still no coords? Raise an exception if trace.stats.coords is None: _add_paz_and_coords.skipped.append(traceid) raise Exception( '%s: could not find coords for trace: skipping trace' % traceid) if trace.stats.coords.latitude == trace.stats.coords.longitude == 0: logger.warning( '{}: trace has latitude and longitude equal to zero!'.format( traceid))