def test_read_wavename(self): from eqcorrscan.utils.sfile_util import readwavename import os testing_path = os.path.join(os.path.abspath(os.path.dirname(__file__)), 'test_data', 'REA', 'TEST_', '19-0926-59L.S201309') wavefiles = readwavename(testing_path) self.assertEqual(len(wavefiles), 1)
def setUpClass(cls): cls.testing_path = os.path.join( os.path.abspath(os.path.dirname(__file__)), 'test_data') sfile = os.path.join(cls.testing_path, 'REA', 'TEST_', '01-0411-15L.S201309') cls.event = read_event(sfile) cls.wavfiles = readwavename(sfile) cls.datapath = os.path.join(cls.testing_path, 'WAV', 'TEST_') cls.st = read(os.path.join(cls.datapath, cls.wavfiles[0])) cls.respdir = cls.testing_path
def test_read_write(self): """ Function to test the read and write capabilities of sfile_util. """ import os from obspy.core.event import Catalog import obspy if int(obspy.__version__.split('.')[0]) >= 1: from obspy.core.event import read_events else: from obspy.core.event import readEvents as read_events # Set-up a test event test_event = basic_test_event() # Add the event to a catalogue which can be used for QuakeML testing test_cat = Catalog() test_cat += test_event # Write the catalog test_cat.write("Test_catalog.xml", format='QUAKEML') # Read and check read_cat = read_events("Test_catalog.xml") os.remove("Test_catalog.xml") self.assertEqual(read_cat[0].resource_id, test_cat[0].resource_id) self.assertEqual(read_cat[0].picks, test_cat[0].picks) self.assertEqual(read_cat[0].origins[0].resource_id, test_cat[0].origins[0].resource_id) self.assertEqual(read_cat[0].origins[0].time, test_cat[0].origins[0].time) # Note that time_residuel_RMS is not a quakeML format self.assertEqual(read_cat[0].origins[0].longitude, test_cat[0].origins[0].longitude) self.assertEqual(read_cat[0].origins[0].latitude, test_cat[0].origins[0].latitude) self.assertEqual(read_cat[0].origins[0].depth, test_cat[0].origins[0].depth) self.assertEqual(read_cat[0].magnitudes, test_cat[0].magnitudes) self.assertEqual(read_cat[0].event_descriptions, test_cat[0].event_descriptions) self.assertEqual(read_cat[0].amplitudes[0].resource_id, test_cat[0].amplitudes[0].resource_id) self.assertEqual(read_cat[0].amplitudes[0].period, test_cat[0].amplitudes[0].period) self.assertEqual(read_cat[0].amplitudes[0].unit, test_cat[0].amplitudes[0].unit) self.assertEqual(read_cat[0].amplitudes[0].generic_amplitude, test_cat[0].amplitudes[0].generic_amplitude) self.assertEqual(read_cat[0].amplitudes[0].pick_id, test_cat[0].amplitudes[0].pick_id) self.assertEqual(read_cat[0].amplitudes[0].waveform_id, test_cat[0].amplitudes[0].waveform_id) # Check the read-write s-file functionality sfile = eventtosfile(test_cat[0], userID='TEST', evtype='L', outdir='.', wavefiles='test', explosion=True, overwrite=True) del read_cat self.assertEqual(readwavename(sfile), ['test']) read_cat = Catalog() read_cat += readpicks(sfile) os.remove(sfile) self.assertEqual(read_cat[0].picks[0].time, test_cat[0].picks[0].time) self.assertEqual(read_cat[0].picks[0].backazimuth, test_cat[0].picks[0].backazimuth) self.assertEqual(read_cat[0].picks[0].onset, test_cat[0].picks[0].onset) self.assertEqual(read_cat[0].picks[0].phase_hint, test_cat[0].picks[0].phase_hint) self.assertEqual(read_cat[0].picks[0].polarity, test_cat[0].picks[0].polarity) self.assertEqual(read_cat[0].picks[0].waveform_id.station_code, test_cat[0].picks[0].waveform_id.station_code) self.assertEqual(read_cat[0].picks[0].waveform_id.channel_code[-1], test_cat[0].picks[0].waveform_id.channel_code[-1]) # assert read_cat[0].origins[0].resource_id ==\ # test_cat[0].origins[0].resource_id self.assertEqual(read_cat[0].origins[0].time, test_cat[0].origins[0].time) # Note that time_residuel_RMS is not a quakeML format self.assertEqual(read_cat[0].origins[0].longitude, test_cat[0].origins[0].longitude) self.assertEqual(read_cat[0].origins[0].latitude, test_cat[0].origins[0].latitude) self.assertEqual(read_cat[0].origins[0].depth, test_cat[0].origins[0].depth) self.assertEqual(read_cat[0].magnitudes[0].mag, test_cat[0].magnitudes[0].mag) self.assertEqual(read_cat[0].magnitudes[1].mag, test_cat[0].magnitudes[1].mag) self.assertEqual(read_cat[0].magnitudes[2].mag, test_cat[0].magnitudes[2].mag) self.assertEqual(read_cat[0].magnitudes[0].creation_info, test_cat[0].magnitudes[0].creation_info) self.assertEqual(read_cat[0].magnitudes[1].creation_info, test_cat[0].magnitudes[1].creation_info) self.assertEqual(read_cat[0].magnitudes[2].creation_info, test_cat[0].magnitudes[2].creation_info) self.assertEqual(read_cat[0].magnitudes[0].magnitude_type, test_cat[0].magnitudes[0].magnitude_type) self.assertEqual(read_cat[0].magnitudes[1].magnitude_type, test_cat[0].magnitudes[1].magnitude_type) self.assertEqual(read_cat[0].magnitudes[2].magnitude_type, test_cat[0].magnitudes[2].magnitude_type) self.assertEqual(read_cat[0].event_descriptions, test_cat[0].event_descriptions) # assert read_cat[0].amplitudes[0].resource_id ==\ # test_cat[0].amplitudes[0].resource_id self.assertEqual(read_cat[0].amplitudes[0].period, test_cat[0].amplitudes[0].period) self.assertEqual(read_cat[0].amplitudes[0].snr, test_cat[0].amplitudes[0].snr) del read_cat # assert read_cat[0].amplitudes[0].pick_id ==\ # test_cat[0].amplitudes[0].pick_id # assert read_cat[0].amplitudes[0].waveform_id ==\ # test_cat[0].amplitudes[0].waveform_id # Test the wrappers for PICK and EVENTINFO classes picks, evinfo = eventtopick(test_cat) # Test the conversion back conv_cat = Catalog() conv_cat.append(picktoevent(evinfo, picks)) self.assertEqual(conv_cat[0].picks[0].time, test_cat[0].picks[0].time) self.assertEqual(conv_cat[0].picks[0].backazimuth, test_cat[0].picks[0].backazimuth) self.assertEqual(conv_cat[0].picks[0].onset, test_cat[0].picks[0].onset) self.assertEqual(conv_cat[0].picks[0].phase_hint, test_cat[0].picks[0].phase_hint) self.assertEqual(conv_cat[0].picks[0].polarity, test_cat[0].picks[0].polarity) self.assertEqual(conv_cat[0].picks[0].waveform_id.station_code, test_cat[0].picks[0].waveform_id.station_code) self.assertEqual(conv_cat[0].picks[0].waveform_id.channel_code[-1], test_cat[0].picks[0].waveform_id.channel_code[-1]) # self.assertEqual(read_cat[0].origins[0].resource_id, # test_cat[0].origins[0].resource_id) self.assertEqual(conv_cat[0].origins[0].time, test_cat[0].origins[0].time) # Note that time_residuel_RMS is not a quakeML format self.assertEqual(conv_cat[0].origins[0].longitude, test_cat[0].origins[0].longitude) self.assertEqual(conv_cat[0].origins[0].latitude, test_cat[0].origins[0].latitude) self.assertEqual(conv_cat[0].origins[0].depth, test_cat[0].origins[0].depth) self.assertEqual(conv_cat[0].magnitudes[0].mag, test_cat[0].magnitudes[0].mag) self.assertEqual(conv_cat[0].magnitudes[1].mag, test_cat[0].magnitudes[1].mag) self.assertEqual(conv_cat[0].magnitudes[2].mag, test_cat[0].magnitudes[2].mag) self.assertEqual(conv_cat[0].magnitudes[0].creation_info, test_cat[0].magnitudes[0].creation_info) self.assertEqual(conv_cat[0].magnitudes[1].creation_info, test_cat[0].magnitudes[1].creation_info) self.assertEqual(conv_cat[0].magnitudes[2].creation_info, test_cat[0].magnitudes[2].creation_info) self.assertEqual(conv_cat[0].magnitudes[0].magnitude_type, test_cat[0].magnitudes[0].magnitude_type) self.assertEqual(conv_cat[0].magnitudes[1].magnitude_type, test_cat[0].magnitudes[1].magnitude_type) self.assertEqual(conv_cat[0].magnitudes[2].magnitude_type, test_cat[0].magnitudes[2].magnitude_type) self.assertEqual(conv_cat[0].event_descriptions, test_cat[0].event_descriptions) # self.assertEqual(read_cat[0].amplitudes[0].resource_id, # test_cat[0].amplitudes[0].resource_id) self.assertEqual(conv_cat[0].amplitudes[0].period, test_cat[0].amplitudes[0].period) self.assertEqual(conv_cat[0].amplitudes[0].snr, test_cat[0].amplitudes[0].snr)
def from_sfile(sfile, lowcut, highcut, samp_rate, filt_order, length, swin, prepick=0.05, debug=0, plot=False): r"""Function to read in picks from sfile then generate the template from \ the picks within this and the wavefile found in the pick file. :type sfile: string :param sfile: sfilename must be the \ path to a seisan nordic type s-file containing waveform and pick \ information. :type lowcut: float :param lowcut: Low cut (Hz), if set to None will look in template \ defaults file :type highcut: float :param highcut: High cut (Hz), if set to None will look in template \ defaults file :type samp_rate: float :param samp_rate: New sampling rate in Hz, if set to None will look in \ template defaults file :type filt_order: int :param filt_order: Filter level, if set to None will look in \ template defaults file :type swin: str :param swin: Either 'all', 'P' or 'S', to select which phases to output. :type length: float :param length: Extract length in seconds, if None will look in template \ defaults file. :type prepick: float :param prepick: Length to extract prior to the pick in seconds. :type debug: int :param debug: Debug level, higher number=more output. :type plot: bool :param plot: Turns template plotting on or off. :returns: obspy.Stream Newly cut template .. warning:: This will use whatever data is pointed to in the s-file, if \ this is not the coninuous data, we recommend using other functions. \ Differences in processing between short files and day-long files \ (inherent to resampling) will produce lower cross-correlations. """ # Perform some checks first import os if not os.path.isfile(sfile): raise IOError('sfile does not exist') from eqcorrscan.utils import pre_processing from eqcorrscan.utils import sfile_util from obspy import read as obsread # Read in the header of the sfile wavefiles = sfile_util.readwavename(sfile) pathparts = sfile.split('/')[0:-1] new_path_parts = [] for part in pathparts: if part == 'REA': part = 'WAV' new_path_parts.append(part) # * argument to allow .join() to accept a list wavpath = os.path.join(*new_path_parts) + '/' # In case of absolute paths (not handled with .split() --> .join()) if sfile[0] == '/': wavpath = '/' + wavpath # Read in waveform file for wavefile in wavefiles: print(''.join(["I am going to read waveform data from: ", wavpath, wavefile])) if 'st' not in locals(): st = obsread(wavpath + wavefile) else: st += obsread(wavpath + wavefile) for tr in st: if tr.stats.sampling_rate < samp_rate: print('Sampling rate of data is lower than sampling rate asked ' + 'for') print('Not good practice for correlations: I will not do this') raise ValueError("Trace: " + tr.stats.station + " sampling rate: " + str(tr.stats.sampling_rate)) # Read in pick info catalog = sfile_util.readpicks(sfile) # Read the list of Picks for this event picks = catalog[0].picks print("I have found the following picks") for pick in picks: print(' '.join([pick.waveform_id.station_code, pick.waveform_id.channel_code, pick.phase_hint, str(pick.time)])) # Process waveform data st.merge(fill_value='interpolate') st = pre_processing.shortproc(st, lowcut, highcut, filt_order, samp_rate, debug) st1 = _template_gen(picks=picks, st=st, length=length, swin=swin, prepick=prepick, plot=plot) return st1
def test_read_write(self): """ Function to test the read and write capabilities of sfile_util. """ import os from obspy.core.event import Catalog import obspy if int(obspy.__version__.split('.')[0]) >= 1: from obspy.core.event import read_events else: from obspy.core.event import readEvents as read_events # Set-up a test event test_event = full_test_event() # Add the event to a catalogue which can be used for QuakeML testing test_cat = Catalog() test_cat += test_event # Write the catalog test_cat.write("Test_catalog.xml", format='QUAKEML') # Read and check read_cat = read_events("Test_catalog.xml") os.remove("Test_catalog.xml") self.assertEqual(read_cat[0].resource_id, test_cat[0].resource_id) for i in range(len(read_cat[0].picks)): for key in read_cat[0].picks[i].keys(): # Ignore backazimuth errors and horizontal_slowness_errors if key in ['backazimuth_errors', 'horizontal_slowness_errors']: continue self.assertEqual(read_cat[0].picks[i][key], test_cat[0].picks[i][key]) self.assertEqual(read_cat[0].origins[0].resource_id, test_cat[0].origins[0].resource_id) self.assertEqual(read_cat[0].origins[0].time, test_cat[0].origins[0].time) # Note that time_residual_RMS is not a quakeML format self.assertEqual(read_cat[0].origins[0].longitude, test_cat[0].origins[0].longitude) self.assertEqual(read_cat[0].origins[0].latitude, test_cat[0].origins[0].latitude) self.assertEqual(read_cat[0].origins[0].depth, test_cat[0].origins[0].depth) # Check magnitudes self.assertEqual(read_cat[0].magnitudes, test_cat[0].magnitudes) self.assertEqual(read_cat[0].event_descriptions, test_cat[0].event_descriptions) # Check local magnitude amplitude self.assertEqual(read_cat[0].amplitudes[0].resource_id, test_cat[0].amplitudes[0].resource_id) self.assertEqual(read_cat[0].amplitudes[0].period, test_cat[0].amplitudes[0].period) self.assertEqual(read_cat[0].amplitudes[0].unit, test_cat[0].amplitudes[0].unit) self.assertEqual(read_cat[0].amplitudes[0].generic_amplitude, test_cat[0].amplitudes[0].generic_amplitude) self.assertEqual(read_cat[0].amplitudes[0].pick_id, test_cat[0].amplitudes[0].pick_id) self.assertEqual(read_cat[0].amplitudes[0].waveform_id, test_cat[0].amplitudes[0].waveform_id) # Check coda magnitude pick self.assertEqual(read_cat[0].amplitudes[1].resource_id, test_cat[0].amplitudes[1].resource_id) self.assertEqual(read_cat[0].amplitudes[1].type, test_cat[0].amplitudes[1].type) self.assertEqual(read_cat[0].amplitudes[1].unit, test_cat[0].amplitudes[1].unit) self.assertEqual(read_cat[0].amplitudes[1].generic_amplitude, test_cat[0].amplitudes[1].generic_amplitude) self.assertEqual(read_cat[0].amplitudes[1].pick_id, test_cat[0].amplitudes[1].pick_id) self.assertEqual(read_cat[0].amplitudes[1].waveform_id, test_cat[0].amplitudes[1].waveform_id) self.assertEqual(read_cat[0].amplitudes[1].magnitude_hint, test_cat[0].amplitudes[1].magnitude_hint) self.assertEqual(read_cat[0].amplitudes[1].snr, test_cat[0].amplitudes[1].snr) self.assertEqual(read_cat[0].amplitudes[1].category, test_cat[0].amplitudes[1].category) # Check the read-write s-file functionality sfile = eventtosfile(test_cat[0], userID='TEST', evtype='L', outdir='.', wavefiles='test', explosion=True, overwrite=True) del read_cat self.assertEqual(readwavename(sfile), ['test']) read_cat = Catalog() read_cat += readpicks(sfile) os.remove(sfile) for i in range(len(read_cat[0].picks)): self.assertEqual(read_cat[0].picks[i].time, test_cat[0].picks[i].time) self.assertEqual(read_cat[0].picks[i].backazimuth, test_cat[0].picks[i].backazimuth) self.assertEqual(read_cat[0].picks[i].onset, test_cat[0].picks[i].onset) self.assertEqual(read_cat[0].picks[i].phase_hint, test_cat[0].picks[i].phase_hint) self.assertEqual(read_cat[0].picks[i].polarity, test_cat[0].picks[i].polarity) self.assertEqual(read_cat[0].picks[i].waveform_id.station_code, test_cat[0].picks[i].waveform_id.station_code) self.assertEqual(read_cat[0].picks[i].waveform_id.channel_code[-1], test_cat[0].picks[i].waveform_id.channel_code[-1]) # assert read_cat[0].origins[0].resource_id ==\ # test_cat[0].origins[0].resource_id self.assertEqual(read_cat[0].origins[0].time, test_cat[0].origins[0].time) # Note that time_residual_RMS is not a quakeML format self.assertEqual(read_cat[0].origins[0].longitude, test_cat[0].origins[0].longitude) self.assertEqual(read_cat[0].origins[0].latitude, test_cat[0].origins[0].latitude) self.assertEqual(read_cat[0].origins[0].depth, test_cat[0].origins[0].depth) self.assertEqual(read_cat[0].magnitudes[0].mag, test_cat[0].magnitudes[0].mag) self.assertEqual(read_cat[0].magnitudes[1].mag, test_cat[0].magnitudes[1].mag) self.assertEqual(read_cat[0].magnitudes[2].mag, test_cat[0].magnitudes[2].mag) self.assertEqual(read_cat[0].magnitudes[0].creation_info, test_cat[0].magnitudes[0].creation_info) self.assertEqual(read_cat[0].magnitudes[1].creation_info, test_cat[0].magnitudes[1].creation_info) self.assertEqual(read_cat[0].magnitudes[2].creation_info, test_cat[0].magnitudes[2].creation_info) self.assertEqual(read_cat[0].magnitudes[0].magnitude_type, test_cat[0].magnitudes[0].magnitude_type) self.assertEqual(read_cat[0].magnitudes[1].magnitude_type, test_cat[0].magnitudes[1].magnitude_type) self.assertEqual(read_cat[0].magnitudes[2].magnitude_type, test_cat[0].magnitudes[2].magnitude_type) self.assertEqual(read_cat[0].event_descriptions, test_cat[0].event_descriptions) # assert read_cat[0].amplitudes[0].resource_id ==\ # test_cat[0].amplitudes[0].resource_id self.assertEqual(read_cat[0].amplitudes[0].period, test_cat[0].amplitudes[0].period) self.assertEqual(read_cat[0].amplitudes[0].snr, test_cat[0].amplitudes[0].snr) # Check coda magnitude pick # Resource ids get overwritten because you can't have two the same in # memory # self.assertEqual(read_cat[0].amplitudes[1].resource_id, # test_cat[0].amplitudes[1].resource_id) self.assertEqual(read_cat[0].amplitudes[1].type, test_cat[0].amplitudes[1].type) self.assertEqual(read_cat[0].amplitudes[1].unit, test_cat[0].amplitudes[1].unit) self.assertEqual(read_cat[0].amplitudes[1].generic_amplitude, test_cat[0].amplitudes[1].generic_amplitude) # Resource ids get overwritten because you can't have two the same in # memory # self.assertEqual(read_cat[0].amplitudes[1].pick_id, # test_cat[0].amplitudes[1].pick_id) self.assertEqual(read_cat[0].amplitudes[1].waveform_id.station_code, test_cat[0].amplitudes[1].waveform_id.station_code) self.assertEqual( read_cat[0].amplitudes[1].waveform_id.channel_code, test_cat[0].amplitudes[1].waveform_id.channel_code[0] + test_cat[0].amplitudes[1].waveform_id.channel_code[-1]) self.assertEqual(read_cat[0].amplitudes[1].magnitude_hint, test_cat[0].amplitudes[1].magnitude_hint) # snr is not supported in s-file # self.assertEqual(read_cat[0].amplitudes[1].snr, # test_cat[0].amplitudes[1].snr) self.assertEqual(read_cat[0].amplitudes[1].category, test_cat[0].amplitudes[1].category) del read_cat # Test a deliberate fail test_cat.append(full_test_event()) with self.assertRaises(IOError): # Raises error due to multiple events in catalog sfile = eventtosfile(test_cat, userID='TEST', evtype='L', outdir='.', wavefiles='test', explosion=True, overwrite=True) # Raises error due to too long userID sfile = eventtosfile(test_cat[0], userID='TESTICLE', evtype='L', outdir='.', wavefiles='test', explosion=True, overwrite=True) # Raises error due to unrecognised event type sfile = eventtosfile(test_cat[0], userID='TEST', evtype='U', outdir='.', wavefiles='test', explosion=True, overwrite=True) # Raises error due to no output directory sfile = eventtosfile(test_cat[0], userID='TEST', evtype='L', outdir='albatross', wavefiles='test', explosion=True, overwrite=True) # Raises error due to incorrect wavefil formatting sfile = eventtosfile(test_cat[0], userID='TEST', evtype='L', outdir='.', wavefiles=1234, explosion=True, overwrite=True) with self.assertRaises(IndexError): invalid_origin = test_cat[0].copy() invalid_origin.origins = [] sfile = eventtosfile(invalid_origin, userID='TEST', evtype='L', outdir='.', wavefiles='test', explosion=True, overwrite=True) with self.assertRaises(ValueError): invalid_origin = test_cat[0].copy() invalid_origin.origins[0].time = None sfile = eventtosfile(invalid_origin, userID='TEST', evtype='L', outdir='.', wavefiles='test', explosion=True, overwrite=True) # Write a near empty origin valid_origin = test_cat[0].copy() valid_origin.origins[0].latitude = None valid_origin.origins[0].longitude = None valid_origin.origins[0].depth = None sfile = eventtosfile(valid_origin, userID='TEST', evtype='L', outdir='.', wavefiles='test', explosion=True, overwrite=True) self.assertTrue(os.path.isfile(sfile)) os.remove(sfile)
def test_read_write(self): """ Function to test the read and write capabilities of sfile_util. """ import os from obspy.core.event import Catalog import obspy if int(obspy.__version__.split('.')[0]) >= 1: from obspy.core.event import read_events else: from obspy.core.event import readEvents as read_events # Set-up a test event test_event = full_test_event() # Add the event to a catalogue which can be used for QuakeML testing test_cat = Catalog() test_cat += test_event # Write the catalog test_cat.write("Test_catalog.xml", format='QUAKEML') # Read and check read_cat = read_events("Test_catalog.xml") os.remove("Test_catalog.xml") self.assertEqual(read_cat[0].resource_id, test_cat[0].resource_id) for i in range(len(read_cat[0].picks)): for key in read_cat[0].picks[i].keys(): # Ignore backazimuth errors and horizontal_slowness_errors if key in ['backazimuth_errors', 'horizontal_slowness_errors']: continue self.assertEqual(read_cat[0].picks[i][key], test_cat[0].picks[i][key]) self.assertEqual(read_cat[0].origins[0].resource_id, test_cat[0].origins[0].resource_id) self.assertEqual(read_cat[0].origins[0].time, test_cat[0].origins[0].time) # Note that time_residual_RMS is not a quakeML format self.assertEqual(read_cat[0].origins[0].longitude, test_cat[0].origins[0].longitude) self.assertEqual(read_cat[0].origins[0].latitude, test_cat[0].origins[0].latitude) self.assertEqual(read_cat[0].origins[0].depth, test_cat[0].origins[0].depth) # Check magnitudes self.assertEqual(read_cat[0].magnitudes, test_cat[0].magnitudes) self.assertEqual(read_cat[0].event_descriptions, test_cat[0].event_descriptions) # Check local magnitude amplitude self.assertEqual(read_cat[0].amplitudes[0].resource_id, test_cat[0].amplitudes[0].resource_id) self.assertEqual(read_cat[0].amplitudes[0].period, test_cat[0].amplitudes[0].period) self.assertEqual(read_cat[0].amplitudes[0].unit, test_cat[0].amplitudes[0].unit) self.assertEqual(read_cat[0].amplitudes[0].generic_amplitude, test_cat[0].amplitudes[0].generic_amplitude) self.assertEqual(read_cat[0].amplitudes[0].pick_id, test_cat[0].amplitudes[0].pick_id) self.assertEqual(read_cat[0].amplitudes[0].waveform_id, test_cat[0].amplitudes[0].waveform_id) # Check coda magnitude pick self.assertEqual(read_cat[0].amplitudes[1].resource_id, test_cat[0].amplitudes[1].resource_id) self.assertEqual(read_cat[0].amplitudes[1].type, test_cat[0].amplitudes[1].type) self.assertEqual(read_cat[0].amplitudes[1].unit, test_cat[0].amplitudes[1].unit) self.assertEqual(read_cat[0].amplitudes[1].generic_amplitude, test_cat[0].amplitudes[1].generic_amplitude) self.assertEqual(read_cat[0].amplitudes[1].pick_id, test_cat[0].amplitudes[1].pick_id) self.assertEqual(read_cat[0].amplitudes[1].waveform_id, test_cat[0].amplitudes[1].waveform_id) self.assertEqual(read_cat[0].amplitudes[1].magnitude_hint, test_cat[0].amplitudes[1].magnitude_hint) self.assertEqual(read_cat[0].amplitudes[1].snr, test_cat[0].amplitudes[1].snr) self.assertEqual(read_cat[0].amplitudes[1].category, test_cat[0].amplitudes[1].category) # Check the read-write s-file functionality sfile = eventtosfile(test_cat[0], userID='TEST', evtype='L', outdir='.', wavefiles='test', explosion=True, overwrite=True) del read_cat self.assertEqual(readwavename(sfile), ['test']) read_cat = Catalog() read_cat += readpicks(sfile) os.remove(sfile) for i in range(len(read_cat[0].picks)): self.assertEqual(read_cat[0].picks[i].time, test_cat[0].picks[i].time) self.assertEqual(read_cat[0].picks[i].backazimuth, test_cat[0].picks[i].backazimuth) self.assertEqual(read_cat[0].picks[i].onset, test_cat[0].picks[i].onset) self.assertEqual(read_cat[0].picks[i].phase_hint, test_cat[0].picks[i].phase_hint) self.assertEqual(read_cat[0].picks[i].polarity, test_cat[0].picks[i].polarity) self.assertEqual(read_cat[0].picks[i].waveform_id.station_code, test_cat[0].picks[i].waveform_id.station_code) self.assertEqual(read_cat[0].picks[i].waveform_id.channel_code[-1], test_cat[0].picks[i].waveform_id.channel_code[-1]) # assert read_cat[0].origins[0].resource_id ==\ # test_cat[0].origins[0].resource_id self.assertEqual(read_cat[0].origins[0].time, test_cat[0].origins[0].time) # Note that time_residual_RMS is not a quakeML format self.assertEqual(read_cat[0].origins[0].longitude, test_cat[0].origins[0].longitude) self.assertEqual(read_cat[0].origins[0].latitude, test_cat[0].origins[0].latitude) self.assertEqual(read_cat[0].origins[0].depth, test_cat[0].origins[0].depth) self.assertEqual(read_cat[0].magnitudes[0].mag, test_cat[0].magnitudes[0].mag) self.assertEqual(read_cat[0].magnitudes[1].mag, test_cat[0].magnitudes[1].mag) self.assertEqual(read_cat[0].magnitudes[2].mag, test_cat[0].magnitudes[2].mag) self.assertEqual(read_cat[0].magnitudes[0].creation_info, test_cat[0].magnitudes[0].creation_info) self.assertEqual(read_cat[0].magnitudes[1].creation_info, test_cat[0].magnitudes[1].creation_info) self.assertEqual(read_cat[0].magnitudes[2].creation_info, test_cat[0].magnitudes[2].creation_info) self.assertEqual(read_cat[0].magnitudes[0].magnitude_type, test_cat[0].magnitudes[0].magnitude_type) self.assertEqual(read_cat[0].magnitudes[1].magnitude_type, test_cat[0].magnitudes[1].magnitude_type) self.assertEqual(read_cat[0].magnitudes[2].magnitude_type, test_cat[0].magnitudes[2].magnitude_type) self.assertEqual(read_cat[0].event_descriptions, test_cat[0].event_descriptions) # assert read_cat[0].amplitudes[0].resource_id ==\ # test_cat[0].amplitudes[0].resource_id self.assertEqual(read_cat[0].amplitudes[0].period, test_cat[0].amplitudes[0].period) self.assertEqual(read_cat[0].amplitudes[0].snr, test_cat[0].amplitudes[0].snr) # Check coda magnitude pick # Resource ids get overwritten because you can't have two the same in # memory # self.assertEqual(read_cat[0].amplitudes[1].resource_id, # test_cat[0].amplitudes[1].resource_id) self.assertEqual(read_cat[0].amplitudes[1].type, test_cat[0].amplitudes[1].type) self.assertEqual(read_cat[0].amplitudes[1].unit, test_cat[0].amplitudes[1].unit) self.assertEqual(read_cat[0].amplitudes[1].generic_amplitude, test_cat[0].amplitudes[1].generic_amplitude) # Resource ids get overwritten because you can't have two the same in # memory # self.assertEqual(read_cat[0].amplitudes[1].pick_id, # test_cat[0].amplitudes[1].pick_id) self.assertEqual(read_cat[0].amplitudes[1].waveform_id.station_code, test_cat[0].amplitudes[1].waveform_id.station_code) self.assertEqual(read_cat[0].amplitudes[1].waveform_id.channel_code, test_cat[0].amplitudes[1]. waveform_id.channel_code[0] + test_cat[0].amplitudes[1]. waveform_id.channel_code[-1]) self.assertEqual(read_cat[0].amplitudes[1].magnitude_hint, test_cat[0].amplitudes[1].magnitude_hint) # snr is not supported in s-file # self.assertEqual(read_cat[0].amplitudes[1].snr, # test_cat[0].amplitudes[1].snr) self.assertEqual(read_cat[0].amplitudes[1].category, test_cat[0].amplitudes[1].category) del read_cat # Test a deliberate fail test_cat.append(full_test_event()) with self.assertRaises(IOError): # Raises error due to multiple events in catalog sfile = eventtosfile(test_cat, userID='TEST', evtype='L', outdir='.', wavefiles='test', explosion=True, overwrite=True) # Raises error due to too long userID sfile = eventtosfile(test_cat[0], userID='TESTICLE', evtype='L', outdir='.', wavefiles='test', explosion=True, overwrite=True) # Raises error due to unrecognised event type sfile = eventtosfile(test_cat[0], userID='TEST', evtype='U', outdir='.', wavefiles='test', explosion=True, overwrite=True) # Raises error due to no output directory sfile = eventtosfile(test_cat[0], userID='TEST', evtype='L', outdir='albatross', wavefiles='test', explosion=True, overwrite=True) # Raises error due to incorrect wavefil formatting sfile = eventtosfile(test_cat[0], userID='TEST', evtype='L', outdir='.', wavefiles=1234, explosion=True, overwrite=True) with self.assertRaises(IndexError): invalid_origin = test_cat[0].copy() invalid_origin.origins = [] sfile = eventtosfile(invalid_origin, userID='TEST', evtype='L', outdir='.', wavefiles='test', explosion=True, overwrite=True) with self.assertRaises(ValueError): invalid_origin = test_cat[0].copy() invalid_origin.origins[0].time = None sfile = eventtosfile(invalid_origin, userID='TEST', evtype='L', outdir='.', wavefiles='test', explosion=True, overwrite=True) # Write a near empty origin valid_origin = test_cat[0].copy() valid_origin.origins[0].latitude = None valid_origin.origins[0].longitude = None valid_origin.origins[0].depth = None sfile = eventtosfile(valid_origin, userID='TEST', evtype='L', outdir='.', wavefiles='test', explosion=True, overwrite=True) self.assertTrue(os.path.isfile(sfile)) os.remove(sfile)
def write_correlations(event_list, wavbase, extract_len, pre_pick, shift_len, lowcut=1.0, highcut=10.0, max_sep=4, min_link=8, coh_thresh=0.0, coherence_weight=True, plotvar=False): """ Function to write a dt.cc file for hypoDD input - takes an input list of events and computes pick refienements by correlation. :type event_list: list of tuple :param event_list: List of tuples of event_id (int) and sfile (String) :type wavbase: str :param wavbase: Path to the seisan wave directory that the wavefiles in the S-files are stored :type extract_len: float :param extract_len: Length in seconds to extract around the pick :type pre_pick: float :param pre_pick: Time before the pick to start the correlation window :type shift_len: float :param shift_len: Time to allow pick to vary :type lowcut: float :param lowcut: Lowcut in Hz - default=1.0 :type highcut: float :param highcut: Highcut in Hz - deafult=10.0 :type max_sep: float :param max_sep: Maximum seperation between event pairs in km :type min_link: int :param min_link: Minimum links for an event to be paired :type coherence_weight: bool :param coherence_weight: Use coherence to weight the dt.cc file, or the \ raw cross-correlation value, defaults to false which uses the cross-\ correlation value. :type plotvar: bool :param plotvar: To show the pick-correction plots, defualts to False. .. warning:: This is not a fast routine! .. warning:: In contrast to seisan's \ corr routine, but in accordance with the hypoDD manual, this outputs \ corrected differential time. .. note:: Currently we have not implemented a method for taking \ unassociated event objects and wavefiles. As such if you have events \ with associated wavefiles you are advised to generate Sfiles for each \ event using the sfile_util module prior to this step. """ import obspy if int(obspy.__version__.split('.')[0]) > 0: from obspy.signal.cross_correlation import xcorr_pick_correction else: from obspy.signal.cross_correlation import xcorrPickCorrection \ as xcorr_pick_correction import matplotlib.pyplot as plt from obspy import read from eqcorrscan.utils.mag_calc import dist_calc import glob import warnings corr_list = [] f = open('dt.cc', 'w') f2 = open('dt.cc2', 'w') for i, master in enumerate(event_list): master_sfile = master[1] master_event_id = master[0] master_picks = sfile_util.readpicks(master_sfile).picks master_event = sfile_util.readheader(master_sfile) master_ori_time = master_event.origins[0].time master_location = (master_event.origins[0].latitude, master_event.origins[0].longitude, master_event.origins[0].depth) master_wavefiles = sfile_util.readwavename(master_sfile) masterpath = glob.glob(wavbase + os.sep + master_wavefiles[0]) if masterpath: masterstream = read(masterpath[0]) if len(master_wavefiles) > 1: for wavefile in master_wavefiles: try: masterstream += read(os.join(wavbase, wavefile)) except: continue raise IOError("Couldn't find wavefile") for j in range(i+1, len(event_list)): # Use this tactic to only output unique event pairings slave_sfile = event_list[j][1] slave_event_id = event_list[j][0] slave_wavefiles = sfile_util.readwavename(slave_sfile) try: # slavestream=read(wavbase+'/*/*/'+slave_wavefiles[0]) slavestream = read(wavbase + os.sep + slave_wavefiles[0]) except: # print(slavestream) raise IOError('No wavefile found: '+slave_wavefiles[0]+' ' + slave_sfile) if len(slave_wavefiles) > 1: for wavefile in slave_wavefiles: # slavestream+=read(wavbase+'/*/*/'+wavefile) try: slavestream += read(wavbase+'/'+wavefile) except: continue # Write out the header line event_text = '#'+str(master_event_id).rjust(10) +\ str(slave_event_id).rjust(10)+' 0.0 \n' event_text2 = '#'+str(master_event_id).rjust(10) +\ str(slave_event_id).rjust(10)+' 0.0 \n' slave_picks = sfile_util.readpicks(slave_sfile).picks slave_event = sfile_util.readheader(slave_sfile) slave_ori_time = slave_event.origins[0].time slave_location = (slave_event.origins[0].latitude, slave_event.origins[0].longitude, slave_event.origins[0].depth) if dist_calc(master_location, slave_location) > max_sep: continue links = 0 phases = 0 for pick in master_picks: if pick.phase_hint[0].upper() not in ['P', 'S']: continue # Only use P and S picks, not amplitude or 'other' # Find station, phase pairs # Added by Carolin slave_matches = [p for p in slave_picks if p.phase_hint == pick.phase_hint and p.waveform_id.station_code == pick.waveform_id.station_code] if masterstream.select(station=pick.waveform_id.station_code, channel='*' + pick.waveform_id.channel_code[-1]): mastertr = masterstream.\ select(station=pick.waveform_id.station_code, channel='*' + pick.waveform_id.channel_code[-1])[0] else: print('No waveform data for ' + pick.waveform_id.station_code + '.' + pick.waveform_id.channel_code) print(pick.waveform_id.station_code + '.' + pick.waveform_id.channel_code + ' ' + slave_sfile+' ' + master_sfile) break # Loop through the matches for slave_pick in slave_matches: if slavestream.select(station=slave_pick.waveform_id. station_code, channel='*'+slave_pick.waveform_id. channel_code[-1]): slavetr = slavestream.\ select(station=slave_pick.waveform_id.station_code, channel='*'+slave_pick.waveform_id. channel_code[-1])[0] else: print('No slave data for ' + slave_pick.waveform_id.station_code + '.' + slave_pick.waveform_id.channel_code) print(pick.waveform_id.station_code + '.' + pick.waveform_id.channel_code + ' ' + slave_sfile + ' ' + master_sfile) break # Correct the picks try: correction, cc =\ xcorr_pick_correction(pick.time, mastertr, slave_pick.time, slavetr, pre_pick, extract_len - pre_pick, shift_len, filter="bandpass", filter_options={'freqmin': lowcut, 'freqmax': highcut}, plot=plotvar) # Get the differntial travel time using the # corrected time. # Check that the correction is within the allowed shift # This can occur in the obspy routine when the # correlation function is increasing at the end of the # window. if abs(correction) > shift_len: warnings.warn('Shift correction too large, ' + 'will not use') continue correction = (pick.time - master_ori_time) -\ (slave_pick.time + correction - slave_ori_time) links += 1 if cc * cc >= coh_thresh: if coherence_weight: weight = cc * cc else: weight = cc phases += 1 # added by Caro event_text += pick.waveform_id.station_code.\ ljust(5) + _cc_round(correction, 3).\ rjust(11) + _cc_round(weight, 3).rjust(8) +\ ' '+pick.phase_hint+'\n' event_text2 += pick.waveform_id.station_code\ .ljust(5).upper() +\ _cc_round(correction, 3).rjust(11) +\ _cc_round(weight, 3).rjust(8) +\ ' '+pick.phase_hint+'\n' # links+=1 corr_list.append(cc*cc) except: # Should warn here msg = "Couldn't compute correlation correction" warnings.warn(msg) continue if links >= min_link and phases > 0: f.write(event_text) f2.write(event_text2) if plotvar: plt.hist(corr_list, 150) plt.show() # f.write('\n') f.close() f2.close() return
def Amp_pick_sfile(sfile, datapath, respdir, chans=['Z'], var_wintype=True, winlen=0.9, pre_pick=0.2, pre_filt=True, lowcut=1.0, highcut=20.0, corners=4): """ Function to read information from a SEISAN s-file, load the data and the \ picks, cut the data for the channels given around the S-window, simulate \ a Wood Anderson seismometer, then pick the maximum peak-to-trough \ amplitude. Output will be put into a mag_calc.out file which will be in full S-file \ format and can be copied to a REA database. :type sfile: string :type datapath: string :param datapath: Path to the waveform files - usually the path to the WAV \ directory :type respdir: string :param respdir: Path to the response information directory :type chans: List of strings :param chans: List of the channels to pick on, defaults to ['Z'] - should \ just be the orientations, e.g. Z,1,2,N,E :type var_wintype: bool :param var_wintype: If True, the winlen will be \ multiplied by the P-S time if both P and S picks are \ available, otherwise it will be multiplied by the \ hypocentral distance*0.34 - dervided using a p-s ratio of \ 1.68 and S-velocity of 1.5km/s to give a large window, \ defaults to True :type winlen: float :param winlen: Length of window, see above parameter, if var_wintype is \ False then this will be in seconds, otherwise it is the \ multiplier to the p-s time, defaults to 0.5. :type pre_pick: float :param pre_pick: Time before the s-pick to start the cut window, defaults \ to 0.2 :type pre_filt: bool :param pre_filt: To apply a pre-filter or not, defaults to True :type lowcut: float :param lowcut: Lowcut in Hz for the pre-filter, defaults to 1.0 :type highcut: float :param highcut: Highcut in Hz for the pre-filter, defaults to 20.0 :type corners: int :param corners: Number of corners to use in the pre-filter """ # Hardwire a p-s multiplier of hypocentral distance based on p-s ratio of # 1.68 and an S-velocity 0f 1.5km/s, deliberately chosen to be quite slow ps_multiplier = 0.34 from eqcorrscan.utils import sfile_util from obspy import read from scipy.signal import iirfilter from obspy.signal.invsim import paz2AmpValueOfFreqResp import warnings # First we need to work out what stations have what picks event = sfile_util.readpicks(sfile)[0] # Convert these picks into a lists stations = [] # List of stations channels = [] # List of channels picktimes = [] # List of pick times picktypes = [] # List of pick types distances = [] # List of hypocentral distances picks_out = [] for pick in event.picks: if pick.phase_hint in ['P', 'S']: picks_out.append(pick) # Need to be able to remove this if there # isn't data for a station! stations.append(pick.waveform_id.station_code) channels.append(pick.waveform_id.channel_code) picktimes.append(pick.time) picktypes.append(pick.phase_hint) arrival = [arrival for arrival in event.origins[0].arrivals if arrival.pick_id == pick.resource_id] distances.append(arrival.distance) # Read in waveforms stream = read(datapath+'/'+sfile_util.readwavename(sfile)[0]) if len(sfile_util.readwavename(sfile)) > 1: for wavfile in sfile_util.readwavename(sfile): stream += read(datapath+'/'+wavfile) stream.merge() # merge the data, just in case! # For each station cut the window uniq_stas = list(set(stations)) del arrival for sta in uniq_stas: for chan in chans: print('Working on '+sta+' '+chan) tr = stream.select(station=sta, channel='*'+chan) if not tr: # Remove picks from file # picks_out=[picks_out[i] for i in xrange(len(picks))\ # if picks_out[i].station+picks_out[i].channel != \ # sta+chan] warnings.warn('There is no station and channel match in the ' + 'wavefile!') break else: tr = tr[0] # Apply the pre-filter if pre_filt: try: tr.detrend('simple') except: dummy = tr.split() dummy.detrend('simple') tr = dummy.merge()[0] tr.filter('bandpass', freqmin=lowcut, freqmax=highcut, corners=corners) sta_picks = [i for i in xrange(len(stations)) if stations[i] == sta] pick_id = event.picks[sta_picks[0]].resource_id arrival = [arrival for arrival in event.origins[0].arrivals if arrival.pick_id == pick_id] hypo_dist = arrival.distance CAZ = arrival.azimuth if var_wintype: if 'S' in [picktypes[i] for i in sta_picks] and\ 'P' in [picktypes[i] for i in sta_picks]: # If there is an S-pick we can use this :D S_pick = [picktimes[i] for i in sta_picks if picktypes[i] == 'S'] S_pick = min(S_pick) P_pick = [picktimes[i] for i in sta_picks if picktypes[i] == 'P'] P_pick = min(P_pick) try: tr.trim(starttime=S_pick-pre_pick, endtime=S_pick+(S_pick-P_pick)*winlen) except: break elif 'S' in [picktypes[i] for i in sta_picks]: S_pick = [picktimes[i] for i in sta_picks if picktypes[i] == 'S'] S_pick = min(S_pick) P_modelled = S_pick - hypo_dist * ps_multiplier try: tr.trim(starttime=S_pick-pre_pick, endtime=S_pick + (S_pick - P_modelled) * winlen) except: break else: # In this case we only have a P pick P_pick = [picktimes[i] for i in sta_picks if picktypes[i] == 'P'] P_pick = min(P_pick) S_modelled = P_pick + hypo_dist * ps_multiplier try: tr.trim(starttime=S_modelled - pre_pick, endtime=S_modelled + (S_modelled - P_pick) * winlen) except: break # Work out the window length based on p-s time or distance elif 'S' in [picktypes[i] for i in sta_picks]: # If the window is fixed we still need to find the start time, # which can be based either on the S-pick (this elif), or # on the hypocentral distance and the P-pick # Take the minimum S-pick time if more than one S-pick is # available S_pick = [picktimes[i] for i in sta_picks if picktypes[i] == 'S'] S_pick = min(S_pick) try: tr.trim(starttime=S_pick - pre_pick, endtime=S_pick + winlen) except: break else: # In this case, there is no S-pick and the window length is # fixed we need to calculate an expected S_pick based on the # hypocentral distance, this will be quite hand-wavey as we # are not using any kind of velocity model. P_pick = [picktimes[i] for i in sta_picks if picktypes[i] == 'P'] P_pick = min(P_pick) hypo_dist = [distances[i] for i in sta_picks if picktypes[i] == 'P'][0] S_modelled = P_pick + hypo_dist * ps_multiplier try: tr.trim(starttime=S_modelled - pre_pick, endtime=S_modelled + winlen) except: break # Find the response information resp_info = _find_resp(tr.stats.station, tr.stats.channel, tr.stats.network, tr.stats.starttime, tr.stats.delta, respdir) PAZ = [] seedresp = [] if resp_info and 'gain' in resp_info: PAZ = resp_info elif resp_info: seedresp = resp_info # Simulate a Wood Anderson Seismograph if PAZ and len(tr.data) > 10: # Set ten data points to be the minimum to pass tr = _sim_WA(tr, PAZ, None, 10) elif seedresp and len(tr.data) > 10: tr = _sim_WA(tr, None, seedresp, 10) elif len(tr.data) > 10: warnings.warn('No PAZ for '+tr.stats.station+' ' + tr.stats.channel+' at time: ' + str(tr.stats.starttime)) continue if len(tr.data) <= 10: # Should remove the P and S picks if len(tr.data)==0 warnings.warn('No data found for: '+tr.stats.station) # print 'No data in miniseed file for '+tr.stats.station+\ # ' removing picks' # picks_out=[picks_out[i] for i in xrange(len(picks_out))\ # if i not in sta_picks] break # Get the amplitude amplitude, period, delay = _max_p2t(tr.data, tr.stats.delta) if amplitude == 0.0: break print('Amplitude picked: ' + str(amplitude)) # Note, amplitude should be in meters at the moment! # Remove the pre-filter response if pre_filt: # Generate poles and zeros for the filter we used earlier: this # is how the filter is designed in the convenience methods of # filtering in obspy. z, p, k = iirfilter(corners, [lowcut / (0.5 * tr.stats. sampling_rate), highcut / (0.5 * tr.stats. sampling_rate)], btype='band', ftype='butter', output='zpk') filt_paz = {'poles': list(p), 'zeros': list(z), 'gain': k, 'sensitivity': 1.0} amplitude /= (paz2AmpValueOfFreqResp(filt_paz, 1 / period) * filt_paz['sensitivity']) # Convert amplitude to mm if PAZ: # Divide by Gain to get to nm (returns pm? 10^-12) # amplitude *=PAZ['gain'] amplitude /= 1000 if seedresp: # Seedresp method returns mm amplitude *= 1000000 # Write out the half amplitude, approximately the peak amplitude as # used directly in magnitude calculations # Page 343 of Seisan manual: # Amplitude (Zero-Peak) in units of nm, nm/s, nm/s^2 or counts amplitude *= 0.5 # Generate a PICK type object for this pick picks_out.append(sfile_util.PICK(station=tr.stats.station, channel=tr.stats.channel, impulsivity=' ', phase='IAML', weight='', polarity=' ', time=tr.stats.starttime+delay, coda=999, amplitude=amplitude, peri=period, azimuth=float('NaN'), velocity=float('NaN'), AIN=999, SNR='', azimuthres=999, timeres=float('NaN'), finalweight=999, distance=hypo_dist, CAZ=CAZ)) # Copy the header from the sfile to a new local S-file fin = open(sfile, 'r') fout = open('mag_calc.out', 'w') for line in fin: if not line[79] == '7': fout.write(line) else: fout.write(line) break fin.close() for pick in picks_out: fout.write(pick) # Note this uses the legacy pick class fout.close() # Write picks out to new s-file for pick in picks_out: print(pick) # sfile_util.populatesfile('mag_calc.out', picks_out) return picks_out
def from_sfile(sfile, lowcut, highcut, samp_rate, filt_order, length, swin, prepick=0.05, debug=0, plot=False): r"""Function to read in picks from sfile then generate the template from \ the picks within this and the wavefile found in the pick file. :type sfile: string :param sfile: sfilename must be the \ path to a seisan nordic type s-file containing waveform and pick \ information. :type lowcut: float :param lowcut: Low cut (Hz), if set to None will look in template \ defaults file :type highcut: float :param highcut: High cut (Hz), if set to None will look in template \ defaults file :type samp_rate: float :param samp_rate: New sampling rate in Hz, if set to None will look in \ template defaults file :type filt_order: int :param filt_order: Filter level, if set to None will look in \ template defaults file :type swin: str :param swin: Either 'all', 'P' or 'S', to select which phases to output. :type length: float :param length: Extract length in seconds, if None will look in template \ defaults file. :type prepick: float :param prepick: Length to extract prior to the pick in seconds. :type debug: int :param debug: Debug level, higher number=more output. :type plot: bool :param plot: Turns template plotting on or off. :returns: obspy.Stream Newly cut template .. warning:: This will use whatever data is pointed to in the s-file, if \ this is not the coninuous data, we recommend using other functions. \ Differences in processing between short files and day-long files \ (inherent to resampling) will produce lower cross-correlations. """ # Perform some checks first import os if not os.path.isfile(sfile): raise IOError('sfile does not exist') from eqcorrscan.utils import pre_processing from eqcorrscan.utils import sfile_util from obspy import read as obsread # Read in the header of the sfile wavefiles = sfile_util.readwavename(sfile) pathparts = sfile.split('/')[0:-1] new_path_parts = [] for part in pathparts: if part == 'REA': part = 'WAV' new_path_parts.append(part) # * argument to allow .join() to accept a list wavpath = os.path.join(*new_path_parts) + '/' # In case of absolute paths (not handled with .split() --> .join()) if sfile[0] == '/': wavpath = '/' + wavpath # Read in waveform file for wavefile in wavefiles: print(''.join(["I am going to read waveform data from: ", wavpath, wavefile])) if 'st' not in locals(): st = obsread(wavpath + wavefile) else: st += obsread(wavpath + wavefile) for tr in st: if tr.stats.sampling_rate < samp_rate: print('Sampling rate of data is lower than sampling rate asked ' + 'for') print('Not good practice for correlations: I will not do this') raise ValueError("Trace: " + tr.stats.station + " sampling rate: " + str(tr.stats.sampling_rate)) # Read in pick info catalog = sfile_util.readpicks(sfile) # Read the list of Picks for this event picks = catalog[0].picks print("I have found the following picks") for pick in picks: print(' '.join([pick.waveform_id.station_code, pick.waveform_id.channel_code, pick.phase_hint, str(pick.time)])) # Process waveform data st.merge(fill_value='interpolate') st = pre_processing.shortproc(st, lowcut, highcut, filt_order, samp_rate, debug) st1 = _template_gen(picks=picks, st=st, length=length, swin=swin, prepick=prepick, plot=plot, debug=debug) return st1
def write_correlations(event_list, wavbase, extract_len, pre_pick, shift_len, lowcut=1.0, highcut=10.0, max_sep=8, min_link=8, cc_thresh=0.0, plotvar=False, debug=0): """ Write a dt.cc file for hypoDD input for a given list of events. Takes an input list of events and computes pick refinements by correlation. Outputs two files, dt.cc and dt.cc2, each provides a different weight, dt.cc uses weights of the cross-correlation, and dt.cc2 provides weights as the square of the cross-correlation. :type event_list: list :param event_list: List of tuples of event_id (int) and sfile (String) :type wavbase: str :param wavbase: Path to the seisan wave directory that the wavefiles in the S-files are stored :type extract_len: float :param extract_len: Length in seconds to extract around the pick :type pre_pick: float :param pre_pick: Time before the pick to start the correlation window :type shift_len: float :param shift_len: Time to allow pick to vary :type lowcut: float :param lowcut: Lowcut in Hz - default=1.0 :type highcut: float :param highcut: Highcut in Hz - default=10.0 :type max_sep: float :param max_sep: Maximum separation between event pairs in km :type min_link: int :param min_link: Minimum links for an event to be paired :type cc_thresh: float :param cc_thresh: Threshold to include cross-correlation results. :type plotvar: bool :param plotvar: To show the pick-correction plots, defualts to False. :type debug: int :param debug: Variable debug levels from 0-5, higher=more output. .. warning:: This is not a fast routine! .. warning:: In contrast to seisan's corr routine, but in accordance with the hypoDD manual, this outputs corrected differential time. .. note:: Currently we have not implemented a method for taking unassociated event objects and wavefiles. As such if you have events \ with associated wavefiles you are advised to generate Sfiles for each \ event using the sfile_util module prior to this step. .. note:: There is no provision to taper waveforms within these functions, if you desire this functionality, you should apply the taper before calling this. Note the :func:`obspy.Trace.taper` functions. """ from obspy.signal.cross_correlation import xcorr_pick_correction warnings.filterwarnings(action="ignore", message="Maximum of cross correlation " + "lower than 0.8: *") corr_list = [] f = open('dt.cc', 'w') f2 = open('dt.cc2', 'w') k_events = len(list(event_list)) for i, master in enumerate(event_list): master_sfile = master[1] if debug > 1: print('Computing correlations for master: %s' % master_sfile) master_event_id = master[0] master_picks = sfile_util.readpicks(master_sfile).picks master_event = sfile_util.readheader(master_sfile) master_ori_time = master_event.origins[0].time master_location = (master_event.origins[0].latitude, master_event.origins[0].longitude, master_event.origins[0].depth / 1000.0) master_wavefiles = sfile_util.readwavename(master_sfile) masterpath = glob.glob(wavbase + os.sep + master_wavefiles[0]) if masterpath: masterstream = read(masterpath[0]) if len(master_wavefiles) > 1: for wavefile in master_wavefiles: try: masterstream += read(os.join(wavbase, wavefile)) except: raise IOError("Couldn't find wavefile") continue for j in range(i + 1, k_events): # Use this tactic to only output unique event pairings slave_sfile = event_list[j][1] if debug > 2: print('Comparing to event: %s' % slave_sfile) slave_event_id = event_list[j][0] slave_wavefiles = sfile_util.readwavename(slave_sfile) try: slavestream = read(wavbase + os.sep + slave_wavefiles[0]) except: raise IOError('No wavefile found: ' + slave_wavefiles[0] + ' ' + slave_sfile) if len(slave_wavefiles) > 1: for wavefile in slave_wavefiles: try: slavestream += read(wavbase + os.sep + wavefile) except IOError: print('No waveform found: %s' % (wavbase + os.sep + wavefile)) continue # Write out the header line event_text = '#' + str(master_event_id).rjust(10) +\ str(slave_event_id).rjust(10) + ' 0.0 \n' event_text2 = '#' + str(master_event_id).rjust(10) +\ str(slave_event_id).rjust(10) + ' 0.0 \n' slave_picks = sfile_util.readpicks(slave_sfile).picks slave_event = sfile_util.readheader(slave_sfile) slave_ori_time = slave_event.origins[0].time slave_location = (slave_event.origins[0].latitude, slave_event.origins[0].longitude, slave_event.origins[0].depth / 1000.0) if dist_calc(master_location, slave_location) > max_sep: if debug > 0: print('Seperation exceeds max_sep: %s' % (dist_calc(master_location, slave_location))) continue links = 0 phases = 0 for pick in master_picks: if not hasattr(pick, 'phase_hint') or \ len(pick.phase_hint) == 0: warnings.warn('No phase-hint for pick:') print(pick) continue if pick.phase_hint[0].upper() not in ['P', 'S']: warnings.warn('Will only use P or S phase picks') print(pick) continue # Only use P and S picks, not amplitude or 'other' # Find station, phase pairs # Added by Carolin slave_matches = [ p for p in slave_picks if hasattr(p, 'phase_hint') and p.phase_hint == pick.phase_hint and p.waveform_id.station_code == pick.waveform_id.station_code ] if masterstream.select(station=pick.waveform_id.station_code, channel='*' + pick.waveform_id.channel_code[-1]): mastertr = masterstream.\ select(station=pick.waveform_id.station_code, channel='*' + pick.waveform_id.channel_code[-1])[0] elif debug > 1: print('No waveform data for ' + pick.waveform_id.station_code + '.' + pick.waveform_id.channel_code) print(pick.waveform_id.station_code + '.' + pick.waveform_id.channel_code + ' ' + slave_sfile + ' ' + master_sfile) break # Loop through the matches for slave_pick in slave_matches: if slavestream.select( station=slave_pick.waveform_id.station_code, channel='*' + slave_pick.waveform_id.channel_code[-1]): slavetr = slavestream.\ select(station=slave_pick.waveform_id.station_code, channel='*' + slave_pick.waveform_id. channel_code[-1])[0] else: print('No slave data for ' + slave_pick.waveform_id.station_code + '.' + slave_pick.waveform_id.channel_code) print(pick.waveform_id.station_code + '.' + pick.waveform_id.channel_code + ' ' + slave_sfile + ' ' + master_sfile) break # Correct the picks try: correction, cc =\ xcorr_pick_correction( pick.time, mastertr, slave_pick.time, slavetr, pre_pick, extract_len - pre_pick, shift_len, filter="bandpass", filter_options={'freqmin': lowcut, 'freqmax': highcut}, plot=plotvar) # Get the differential travel time using the # corrected time. # Check that the correction is within the allowed shift # This can occur in the obspy routine when the # correlation function is increasing at the end of the # window. if abs(correction) > shift_len: warnings.warn('Shift correction too large, ' + 'will not use') continue correction = (pick.time - master_ori_time) -\ (slave_pick.time + correction - slave_ori_time) links += 1 if cc >= cc_thresh: weight = cc phases += 1 # added by Caro event_text += pick.waveform_id.station_code.\ ljust(5) + _cc_round(correction, 3).\ rjust(11) + _cc_round(weight, 3).rjust(8) +\ ' ' + pick.phase_hint + '\n' event_text2 += pick.waveform_id.station_code\ .ljust(5) + _cc_round(correction, 3).\ rjust(11) +\ _cc_round(weight * weight, 3).rjust(8) +\ ' ' + pick.phase_hint + '\n' if debug > 3: print(event_text) else: print('cc too low: %s' % cc) corr_list.append(cc * cc) except: msg = "Couldn't compute correlation correction" warnings.warn(msg) continue if links >= min_link and phases > 0: f.write(event_text) f2.write(event_text2) if plotvar: plt.hist(corr_list, 150) plt.show() # f.write('\n') f.close() f2.close() return
def from_sfile(sfile, lowcut, highcut, samp_rate, filt_order, length, swin, prepick=0.05, debug=0, plot=False): """ Generate multiplexed template from a Nordic (Seisan) s-file. Function to read in picks from sfile then generate the template from \ the picks within this and the wavefile found in the pick file. :type sfile: str :param sfile: sfilename must be the \ path to a seisan nordic type s-file containing waveform and pick \ information. :type lowcut: float :param lowcut: Low cut (Hz), if set to None will look in template \ defaults file :type highcut: float :param highcut: High cut (Hz), if set to None will look in template \ defaults file :type samp_rate: float :param samp_rate: New sampling rate in Hz, if set to None will look in \ template defaults file :type filt_order: int :param filt_order: Filter level, if set to None will look in \ template defaults file :type swin: str :param swin: Either 'all', 'P' or 'S', to select which phases to output. :type length: float :param length: Extract length in seconds, if None will look in template \ defaults file. :type prepick: float :param prepick: Length to extract prior to the pick in seconds. :type debug: int :param debug: Debug level, higher number=more output. :type plot: bool :param plot: Turns template plotting on or off. :returns: obspy.core.stream.Stream Newly cut template .. warning:: This will use whatever data is pointed to in the s-file, if \ this is not the coninuous data, we recommend using other functions. \ Differences in processing between short files and day-long files \ (inherent to resampling) will produce lower cross-correlations. .. rubric:: Example >>> from eqcorrscan.core.template_gen import from_sfile >>> sfile = 'eqcorrscan/tests/test_data/REA/TEST_/01-0411-15L.S201309' >>> template = from_sfile(sfile=sfile, lowcut=5.0, highcut=15.0, ... samp_rate=50.0, filt_order=4, swin='P', ... prepick=0.2, length=6) >>> print(len(template)) 15 >>> print(template[0].stats.sampling_rate) 50.0 >>> template.plot(equal_scale=False, size=(800,600)) # doctest: +SKIP .. plot:: from eqcorrscan.core.template_gen import from_sfile import os sfile = os.path.realpath('../../..') + \ '/tests/test_data/REA/TEST_/01-0411-15L.S201309' template = from_sfile(sfile=sfile, lowcut=5.0, highcut=15.0, samp_rate=50.0, filt_order=4, swin='P', prepick=0.2, length=6) template.plot(equal_scale=False, size=(800, 600)) """ # Perform some checks first import os if not os.path.isfile(sfile): raise IOError('sfile does not exist') from eqcorrscan.utils import pre_processing from eqcorrscan.utils import sfile_util from obspy import read as obsread # Read in the header of the sfile wavefiles = sfile_util.readwavename(sfile) pathparts = sfile.split('/')[0:-1] new_path_parts = [] for part in pathparts: if part == 'REA': part = 'WAV' new_path_parts.append(part) main_wav_parts = [] for part in new_path_parts: main_wav_parts.append(part) if part == 'WAV': break mainwav = os.path.join(*main_wav_parts) + os.path.sep # * argument to allow .join() to accept a list wavpath = os.path.join(*new_path_parts) + os.path.sep # In case of absolute paths (not handled with .split() --> .join()) if sfile[0] == os.path.sep: wavpath = os.path.sep + wavpath mainwav = os.path.sep + mainwav # Read in waveform file for wavefile in wavefiles: if debug > 0: print(''.join(["I am going to read waveform data from: ", wavpath, wavefile])) if 'st' not in locals(): if os.path.isfile(wavpath + wavefile): st = obsread(wavpath + wavefile) elif os.path.isfile(wavefile): st = obsread(wavefile) else: # Read from the main WAV directory st = obsread(mainwav + wavefile) else: if os.path.isfile(wavpath + wavefile): st += obsread(wavpath + wavefile) elif os.path.isfile(wavefile): st += obsread(wavefile) else: st += obsread(mainwav + wavefile) for tr in st: if tr.stats.sampling_rate < samp_rate: print('Sampling rate of data is lower than sampling rate asked ' + 'for') print('Not good practice for correlations: I will not do this') raise ValueError("Trace: " + tr.stats.station + " sampling rate: " + str(tr.stats.sampling_rate)) # Read in pick info event = sfile_util.readpicks(sfile) # Read the list of Picks for this event picks = event.picks if debug > 0: print("I have found the following picks") for pick in picks: print(' '.join([pick.waveform_id.station_code, pick.waveform_id.channel_code, pick.phase_hint, str(pick.time)])) # Process waveform data st.merge(fill_value='interpolate') st = pre_processing.shortproc(st, lowcut, highcut, filt_order, samp_rate, debug) st1 = _template_gen(picks=picks, st=st, length=length, swin=swin, prepick=prepick, plot=plot, debug=debug) return st1
def amp_pick_sfile(sfile, datapath, respdir, chans=['Z'], var_wintype=True, winlen=0.9, pre_pick=0.2, pre_filt=True, lowcut=1.0, highcut=20.0, corners=4): """ Function to pick amplitudes for local magnitudes from NORDIC s-files. Reads information from a SEISAN s-file, load the data and the \ picks, cut the data for the channels given around the S-window, simulate \ a Wood Anderson seismometer, then pick the maximum peak-to-trough \ amplitude. Output will be put into a mag_calc.out file which will be in full S-file \ format and can be copied to a REA database. :type sfile: str :param sfile: Path to NORDIC format s-file :type datapath: str :param datapath: Path to the waveform files - usually the path to the WAV \ directory :type respdir: str :param respdir: Path to the response information directory :type chans: list :param chans: List of the channels to pick on, defaults to ['Z'] - should \ just be the orientations, e.g. Z,1,2,N,E :type var_wintype: bool :param var_wintype: If True, the winlen will be \ multiplied by the P-S time if both P and S picks are \ available, otherwise it will be multiplied by the \ hypocentral distance*0.34 - derived using a p-s ratio of \ 1.68 and S-velocity of 1.5km/s to give a large window, \ defaults to True :type winlen: float :param winlen: Length of window, see above parameter, if var_wintype is \ False then this will be in seconds, otherwise it is the \ multiplier to the p-s time, defaults to 0.5. :type pre_pick: float :param pre_pick: Time before the s-pick to start the cut window, defaults \ to 0.2 :type pre_filt: bool :param pre_filt: To apply a pre-filter or not, defaults to True :type lowcut: float :param lowcut: Lowcut in Hz for the pre-filter, defaults to 1.0 :type highcut: float :param highcut: Highcut in Hz for the pre-filter, defaults to 20.0 :type corners: int :param corners: Number of corners to use in the pre-filter :returns: obspy.core.event """ from eqcorrscan.utils import sfile_util from obspy import read import shutil # First we need to work out what stations have what picks event = sfile_util.readpicks(sfile) # Read in waveforms stream = read(datapath+'/'+sfile_util.readwavename(sfile)[0]) if len(sfile_util.readwavename(sfile)) > 1: for wavfile in sfile_util.readwavename(sfile): stream += read(datapath+'/'+wavfile) stream.merge() # merge the data, just in case! event_picked = amp_pick_event(event=event, st=stream, respdir=respdir, chans=chans, var_wintype=var_wintype, winlen=winlen, pre_pick=pre_pick, pre_filt=pre_filt, lowcut=lowcut, highcut=highcut, corners=corners) new_sfile = sfile_util.eventtosfile(event=event, userID=str('EQCO'), evtype=str('L'), outdir=str('.'), wavefiles=sfile_util. readwavename(sfile)) shutil.move(new_sfile, 'mag_calc.out') return event
def amp_pick_sfile(sfile, datapath, respdir, chans=['Z'], var_wintype=True, winlen=0.9, pre_pick=0.2, pre_filt=True, lowcut=1.0, highcut=20.0, corners=4): """ Function to pick amplitudes for local magnitudes from NORDIC s-files. Reads information from a SEISAN s-file, load the data and the \ picks, cut the data for the channels given around the S-window, simulate \ a Wood Anderson seismometer, then pick the maximum peak-to-trough \ amplitude. Output will be put into a mag_calc.out file which will be in full S-file \ format and can be copied to a REA database. :type sfile: str :param sfile: Path to NORDIC format s-file :type datapath: str :param datapath: Path to the waveform files - usually the path to the WAV \ directory :type respdir: str :param respdir: Path to the response information directory :type chans: list :param chans: List of the channels to pick on, defaults to ['Z'] - should \ just be the orientations, e.g. Z,1,2,N,E :type var_wintype: bool :param var_wintype: If True, the winlen will be \ multiplied by the P-S time if both P and S picks are \ available, otherwise it will be multiplied by the \ hypocentral distance*0.34 - derived using a p-s ratio of \ 1.68 and S-velocity of 1.5km/s to give a large window, \ defaults to True :type winlen: float :param winlen: Length of window, see above parameter, if var_wintype is \ False then this will be in seconds, otherwise it is the \ multiplier to the p-s time, defaults to 0.5. :type pre_pick: float :param pre_pick: Time before the s-pick to start the cut window, defaults \ to 0.2 :type pre_filt: bool :param pre_filt: To apply a pre-filter or not, defaults to True :type lowcut: float :param lowcut: Lowcut in Hz for the pre-filter, defaults to 1.0 :type highcut: float :param highcut: Highcut in Hz for the pre-filter, defaults to 20.0 :type corners: int :param corners: Number of corners to use in the pre-filter :returns: obspy.core.event """ from eqcorrscan.utils import sfile_util from obspy import read import shutil # First we need to work out what stations have what picks event = sfile_util.readpicks(sfile) # Read in waveforms stream = read(datapath + '/' + sfile_util.readwavename(sfile)[0]) if len(sfile_util.readwavename(sfile)) > 1: for wavfile in sfile_util.readwavename(sfile): stream += read(datapath + '/' + wavfile) stream.merge() # merge the data, just in case! event_picked = amp_pick_event(event=event, st=stream, respdir=respdir, chans=chans, var_wintype=var_wintype, winlen=winlen, pre_pick=pre_pick, pre_filt=pre_filt, lowcut=lowcut, highcut=highcut, corners=corners) new_sfile = sfile_util.eventtosfile( event=event, userID=str('EQCO'), evtype=str('L'), outdir=str('.'), wavefiles=sfile_util.readwavename(sfile)) shutil.move(new_sfile, 'mag_calc.out') return event
def write_correlations(event_list, wavbase, extract_len, pre_pick, shift_len, lowcut=1.0, highcut=10.0, max_sep=4, min_link=8, coh_thresh=0.0, coherence_weight=True, plotvar=False): """ Function to write a dt.cc file for hypoDD input - takes an input list of events and computes pick refienements by correlation. :type event_list: list of tuple :param event_list: List of tuples of event_id (int) and sfile (String) :type wavbase: str :param wavbase: Path to the seisan wave directory that the wavefiles in the S-files are stored :type extract_len: float :param extract_len: Length in seconds to extract around the pick :type pre_pick: float :param pre_pick: Time before the pick to start the correlation window :type shift_len: float :param shift_len: Time to allow pick to vary :type lowcut: float :param lowcut: Lowcut in Hz - default=1.0 :type highcut: float :param highcut: Highcut in Hz - deafult=10.0 :type max_sep: float :param max_sep: Maximum seperation between event pairs in km :type min_link: int :param min_link: Minimum links for an event to be paired :type coherence_weight: bool :param coherence_weight: Use coherence to weight the dt.cc file, or the \ raw cross-correlation value, defaults to false which uses the cross-\ correlation value. :type plotvar: bool :param plotvar: To show the pick-correction plots, defualts to False. .. warning:: This is not a fast routine! .. warning:: In contrast to seisan's \ corr routine, but in accordance with the hypoDD manual, this outputs \ corrected differential time. .. note:: Currently we have not implemented a method for taking \ unassociated event objects and wavefiles. As such if you have events \ with associated wavefiles you are advised to generate Sfiles for each \ event using the sfile_util module prior to this step. """ import obspy if int(obspy.__version__.split('.')[0]) > 0: from obspy.signal.cross_correlation import xcorr_pick_correction else: from obspy.signal.cross_correlation import xcorrPickCorrection \ as xcorr_pick_correction import matplotlib.pyplot as plt from obspy import read from eqcorrscan.utils.mag_calc import dist_calc import glob import warnings corr_list = [] f = open('dt.cc', 'w') f2 = open('dt.cc2', 'w') for i, master in enumerate(event_list): master_sfile = master[1] master_event_id = master[0] master_picks = sfile_util.readpicks(master_sfile).picks master_event = sfile_util.readheader(master_sfile) master_ori_time = master_event.origins[0].time master_location = (master_event.origins[0].latitude, master_event.origins[0].longitude, master_event.origins[0].depth) master_wavefiles = sfile_util.readwavename(master_sfile) masterpath = glob.glob(wavbase + os.sep + master_wavefiles[0]) if masterpath: masterstream = read(masterpath[0]) if len(master_wavefiles) > 1: for wavefile in master_wavefiles: try: masterstream += read(os.join(wavbase, wavefile)) except: continue raise IOError("Couldn't find wavefile") for j in range(i + 1, len(event_list)): # Use this tactic to only output unique event pairings slave_sfile = event_list[j][1] slave_event_id = event_list[j][0] slave_wavefiles = sfile_util.readwavename(slave_sfile) try: # slavestream=read(wavbase+'/*/*/'+slave_wavefiles[0]) slavestream = read(wavbase + os.sep + slave_wavefiles[0]) except: # print(slavestream) raise IOError('No wavefile found: ' + slave_wavefiles[0] + ' ' + slave_sfile) if len(slave_wavefiles) > 1: for wavefile in slave_wavefiles: # slavestream+=read(wavbase+'/*/*/'+wavefile) try: slavestream += read(wavbase + '/' + wavefile) except: continue # Write out the header line event_text = '#'+str(master_event_id).rjust(10) +\ str(slave_event_id).rjust(10)+' 0.0 \n' event_text2 = '#'+str(master_event_id).rjust(10) +\ str(slave_event_id).rjust(10)+' 0.0 \n' slave_picks = sfile_util.readpicks(slave_sfile).picks slave_event = sfile_util.readheader(slave_sfile) slave_ori_time = slave_event.origins[0].time slave_location = (slave_event.origins[0].latitude, slave_event.origins[0].longitude, slave_event.origins[0].depth) if dist_calc(master_location, slave_location) > max_sep: continue links = 0 phases = 0 for pick in master_picks: if pick.phase_hint[0].upper() not in ['P', 'S']: continue # Only use P and S picks, not amplitude or 'other' # Find station, phase pairs # Added by Carolin slave_matches = [ p for p in slave_picks if p.phase_hint == pick.phase_hint and p.waveform_id.station_code == pick.waveform_id.station_code ] if masterstream.select(station=pick.waveform_id.station_code, channel='*' + pick.waveform_id.channel_code[-1]): mastertr = masterstream.\ select(station=pick.waveform_id.station_code, channel='*' + pick.waveform_id.channel_code[-1])[0] else: print('No waveform data for ' + pick.waveform_id.station_code + '.' + pick.waveform_id.channel_code) print(pick.waveform_id.station_code + '.' + pick.waveform_id.channel_code + ' ' + slave_sfile + ' ' + master_sfile) break # Loop through the matches for slave_pick in slave_matches: if slavestream.select( station=slave_pick.waveform_id.station_code, channel='*' + slave_pick.waveform_id.channel_code[-1]): slavetr = slavestream.\ select(station=slave_pick.waveform_id.station_code, channel='*'+slave_pick.waveform_id. channel_code[-1])[0] else: print('No slave data for ' + slave_pick.waveform_id.station_code + '.' + slave_pick.waveform_id.channel_code) print(pick.waveform_id.station_code + '.' + pick.waveform_id.channel_code + ' ' + slave_sfile + ' ' + master_sfile) break # Correct the picks try: correction, cc =\ xcorr_pick_correction(pick.time, mastertr, slave_pick.time, slavetr, pre_pick, extract_len - pre_pick, shift_len, filter="bandpass", filter_options={'freqmin': lowcut, 'freqmax': highcut}, plot=plotvar) # Get the differntial travel time using the # corrected time. # Check that the correction is within the allowed shift # This can occur in the obspy routine when the # correlation function is increasing at the end of the # window. if abs(correction) > shift_len: warnings.warn('Shift correction too large, ' + 'will not use') continue correction = (pick.time - master_ori_time) -\ (slave_pick.time + correction - slave_ori_time) links += 1 if cc * cc >= coh_thresh: if coherence_weight: weight = cc * cc else: weight = cc phases += 1 # added by Caro event_text += pick.waveform_id.station_code.\ ljust(5) + _cc_round(correction, 3).\ rjust(11) + _cc_round(weight, 3).rjust(8) +\ ' '+pick.phase_hint+'\n' event_text2 += pick.waveform_id.station_code\ .ljust(5).upper() +\ _cc_round(correction, 3).rjust(11) +\ _cc_round(weight, 3).rjust(8) +\ ' '+pick.phase_hint+'\n' # links+=1 corr_list.append(cc * cc) except: # Should warn here msg = "Couldn't compute correlation correction" warnings.warn(msg) continue if links >= min_link and phases > 0: f.write(event_text) f2.write(event_text2) if plotvar: plt.hist(corr_list, 150) plt.show() # f.write('\n') f.close() f2.close() return