def test_issue193(self): """ Test for issue #193: if non-contiguous array is written correctly. """ warnings.filterwarnings("ignore", "Detected non contiguous data") # test all plugins with both read and write method formats_write = \ set(_getEntryPoints('obspy.plugin.waveform', 'writeFormat')) formats_read = \ set(_getEntryPoints('obspy.plugin.waveform', 'readFormat')) formats = set.intersection(formats_write, formats_read) # mseed will raise exception for int64 data, thus use int32 only data = np.arange(10, dtype='int32') # make array non-contiguous data = data[::2] tr = Trace(data=data) for format in formats: # XXX: skip SEGY and SU formats for now as they need some special # headers. if format in ['SEGY', 'SU', 'SEG2']: continue tempfile = NamedTemporaryFile().name tr.write(tempfile, format) if format == "Q": tempfile = tempfile + ".QHD" tr_test = read(tempfile, format)[0] # clean up os.remove(tempfile) if format == 'Q': os.remove(tempfile[:-4] + '.QBN') os.remove(tempfile[:-4]) np.testing.assert_array_equal(tr.data, tr_test.data)
def test_issue193(self): """ Test for issue #193: if non-contiguous array is written correctly. """ warnings.filterwarnings("ignore", "Detected non contiguous data") # test all plugins with both read and write method formats_write = \ set(_getEntryPoints('obspy.plugin.waveform', 'writeFormat')) formats_read = \ set(_getEntryPoints('obspy.plugin.waveform', 'readFormat')) formats = set.intersection(formats_write, formats_read) # mseed will raise exception for int64 data, thus use int32 only data = np.arange(10, dtype='int32') # make array non-contiguous data = data[::2] tr = Trace(data=data) for format in formats: # XXX: skip SEGY and SU formats for now as they need some special # headers. if format in ['SEGY', 'SU', 'SEG2']: continue tempfile = NamedTemporaryFile().name tr.write(tempfile, format) if format == "Q": tempfile = tempfile + ".QHD" tr_test = read(tempfile, format)[0] # clean up os.remove(tempfile) if format == 'Q': os.remove(tempfile[:-4] + '.QBN') os.remove(tempfile[:-4]) np.testing.assert_array_equal(tr.data, tr_test.data)
def normalization(datapath): filepath = datapath + '/predict/syn/Z/' for i in tqdm(range(300), desc='processing'): st = read(filepath + str(i) + '.sac') tr = (st[0].data) / np.max(abs(st[0].data)) sacfile = Trace() sacfile.data = tr[:] sacfile.write(filepath + str(i) + ".sac", format="SAC")
def test_writeSACXYWithMinimumStats(self): """ Write SACXY with minimal stats header, no inhereted from SAC file """ tr = Trace() tr.stats.delta = 0.01 tr.data = np.arange(0, 3000) sac_file = NamedTemporaryFile().name tr.write(sac_file, 'SACXY') st = read(sac_file) os.remove(sac_file) self.assertEquals(st[0].stats.delta, 0.01) self.assertEquals(st[0].stats.sampling_rate, 100.0)
def test_writeSACXYWithMinimumStats(self): """ Write SACXY with minimal stats header, no inhereted from SAC file """ tr = Trace() tr.stats.delta = 0.01 tr.data = np.arange(0, 3000) sac_file = NamedTemporaryFile().name tr.write(sac_file, "SACXY") st = read(sac_file) os.remove(sac_file) self.assertEquals(st[0].stats.delta, 0.01) self.assertEquals(st[0].stats.sampling_rate, 100.0)
def test_writeSmallTrace(self): """ Tests writing Traces containing 0, 1 or 2 samples only. """ for format in ['SAC', 'SACXY']: for num in range(0, 4): tr = Trace(data=np.arange(num)) tempfile = NamedTemporaryFile().name tr.write(tempfile, format=format) # test results st = read(tempfile, format=format) self.assertEquals(len(st), 1) self.assertEquals(len(st[0]), num) os.remove(tempfile)
def test_writeSmallTrace(self): """ Tests writing Traces containing 0, 1 or 2 samples only. """ for format in ['SLIST', 'TSPAIR']: for num in range(0, 4): tr = Trace(data=np.arange(num)) tempfile = NamedTemporaryFile().name tr.write(tempfile, format=format) # test results st = read(tempfile, format=format) self.assertEquals(len(st), 1) self.assertEquals(len(st[0]), num) os.remove(tempfile)
def test_writeSmallTrace(self): """ Tests writing Traces containing 0, 1 or 2 samples only. """ for format in ['SH_ASC', 'Q']: for num in range(0, 4): tr = Trace(data=np.arange(num)) tempfile = NamedTemporaryFile().name if format == 'Q': tempfile += '.QHD' tr.write(tempfile, format=format) # test results st = read(tempfile, format=format) self.assertEquals(len(st), 1) self.assertEquals(len(st[0]), num) # Q files consist of two files - deleting additional file if format == 'Q': os.remove(tempfile[:-4] + '.QBN') os.remove(tempfile[:-4]) os.remove(tempfile)
def test_issue376(self): """ Tests writing Traces containing 1 or 2 samples only. """ # one samples tr = Trace(data=np.ones(1)) tempfile = NamedTemporaryFile().name tr.write(tempfile, format="MSEED") st = read(tempfile) self.assertEquals(len(st), 1) self.assertEquals(len(st[0]), 1) os.remove(tempfile) # two samples tr = Trace(data=np.ones(2)) tempfile = NamedTemporaryFile().name tr.write(tempfile, format="MSEED") st = read(tempfile) self.assertEquals(len(st), 1) self.assertEquals(len(st[0]), 2) os.remove(tempfile)
def test_issue376(self): """ Tests writing Traces containing 1 or 2 samples only. """ # one samples tr = Trace(data=np.ones(1)) tempfile = NamedTemporaryFile().name tr.write(tempfile, format="MSEED") st = read(tempfile) self.assertEquals(len(st), 1) self.assertEquals(len(st[0]), 1) os.remove(tempfile) # two samples tr = Trace(data=np.ones(2)) tempfile = NamedTemporaryFile().name tr.write(tempfile, format="MSEED") st = read(tempfile) self.assertEquals(len(st), 1) self.assertEquals(len(st[0]), 2) os.remove(tempfile)
def y2m(file, path): """ yspec outputs to SAC format """ stationID = int(file.split('.')[-1]) chans = ['BHZ', 'BHN', 'BHE'] dat = np.loadtxt(file) npts = len(dat[:,0]) for i, chan in enumerate(chans): stats = {'network': 'SG', 'station': 'RS%02d' % stationID, 'location': '', 'channel': chan, 'npts': npts, 'sampling_rate': (npts - 1.)/(dat[-1,0] - dat[0,0]), 'starttime': t, 'mseed': {'dataquality': 'D'}} traces = Trace(data=dat[:,1+i], header=stats) traces.write(os.path.join(path, 'SAC', 'dis.%s.%s.%s' % (traces.stats.station, traces.stats.location, traces.stats.channel)), format='SAC')
def test_writeSmallTrace(self): """ Tests writing Traces containing 0, 1 or 2 samples only. """ for format in ['SH_ASC', 'Q']: for num in range(0, 4): tr = Trace(data=np.arange(num)) tempfile = NamedTemporaryFile().name if format == 'Q': tempfile += '.QHD' tr.write(tempfile, format=format) # test results st = read(tempfile, format=format) self.assertEquals(len(st), 1) self.assertEquals(len(st[0]), num) # Q files consist of two files - deleting additional file if format == 'Q': os.remove(tempfile[:-4] + '.QBN') os.remove(tempfile[:-4]) os.remove(tempfile)
def test_issue156(self): """ Test case for issue #156. """ # 1 tr = Trace() tr.stats.delta = 0.01 tr.data = np.arange(0, 3000) sac_file = NamedTemporaryFile().name tr.write(sac_file, "SAC") st = read(sac_file) os.remove(sac_file) self.assertEquals(st[0].stats.delta, 0.01) self.assertEquals(st[0].stats.sampling_rate, 100.0) # 2 tr = Trace() tr.stats.delta = 0.005 tr.data = np.arange(0, 2000) sac_file = NamedTemporaryFile().name tr.write(sac_file, "SAC") st = read(sac_file) os.remove(sac_file) self.assertEquals(st[0].stats.delta, 0.005) self.assertEquals(st[0].stats.sampling_rate, 200.0)
def test_issue156(self): """ Test case for issue #156. """ #1 tr = Trace() tr.stats.delta = 0.01 tr.data = np.arange(0, 3000) sac_file = NamedTemporaryFile().name tr.write(sac_file, 'SAC') st = read(sac_file) os.remove(sac_file) self.assertEquals(st[0].stats.delta, 0.01) self.assertEquals(st[0].stats.sampling_rate, 100.0) #2 tr = Trace() tr.stats.delta = 0.005 tr.data = np.arange(0, 2000) sac_file = NamedTemporaryFile().name tr.write(sac_file, 'SAC') st = read(sac_file) os.remove(sac_file) self.assertEquals(st[0].stats.delta, 0.005) self.assertEquals(st[0].stats.sampling_rate, 200.0)
print "STACK" corrc3 = stack(datac3, stack_method=p['stack_method']) try: os.makedirs('c3/%s/%s.%s/' % (staTarget1, comp1, comp2)) # os.makedirs('c1/%s/'%staTarget1) except: pass t = Trace() t.stats.station = 'c3' t.stats.sampling_rate = p['df'] t.data = np.array(corrc3[::-1]) t.stats.starttime -= (len(corrc3) / 2) / p['df'] t.write('c3/%s/%s.%s/BO.c3.%s.%s.%s.mseed'%(staTarget1,\ comp1, comp2, namepairA_B, comp1, comp2), format='MSEED') # t2 = Trace() # t2.stats.station = 'c1' # t2.stats.sampling_rate = df # t2.data= np.array(aa) # t2.stats.starttime -= ( len(aa) / 2 ) / df # t2.write('c1/%s/c1.%s.mseed'%(staTarget1, pair), format='MSEED') if __name__ == '__main__': #staTarget1 = '235713' #staTarget2 = '236977' #depth = 0 t = time.time() c3_from_bin(sys.argv[1:]) print 'It took :%s s' % (time.time() - t)
def savecorrs(correlation,phaseweight,n_stack,id1,id2,geoinf,\ corrname,corrtype,outdir,params=None,timestring='',startday=None,\ endday=None): #============================================================================== #- Write metadata info to sac header #- Store results #============================================================================== tr=Trace(data=correlation) tr.stats.sac={} if startday == None: startday=UTCDateTime(inp.startdate) if endday == None: endday=UTCDateTime(inp.enddate) (lat1, lon1, lat2, lon2, dist, az, baz)=geoinf # Add a preprocessing string: prepstring = get_prepstring() tr.stats.sampling_rate=inp.Fs[-1] tr.stats.starttime=UTCDateTime(2000, 01, 01)-inp.max_lag*inp.Fs[-1] tr.stats.network=id1.split('.')[0] tr.stats.station=id1.split('.')[1] tr.stats.location=id1.split('.')[2] tr.stats.channel=id1.split('.')[3] tr.stats.sac['kt2']=prepstring tr.stats.sac['kt8']=corrtype tr.stats.sac['user0']=n_stack tr.stats.sac['user1']=inp.winlen tr.stats.sac['user2']=inp.olap tr.stats.sac['b']=-inp.max_lag tr.stats.sac['e']=inp.max_lag tr.stats.sac['kt0']=startday.strftime('%Y%j') tr.stats.sac['kt1']=endday.strftime('%Y%j') tr.stats.sac['iftype']=1 tr.stats.sac['stla']=lat1 tr.stats.sac['stlo']=lon1 tr.stats.sac['kevnm']=id2.split('.')[1] tr.stats.sac['evla']=lat2 tr.stats.sac['evlo']=lon2 tr.stats.sac['dist']=dist tr.stats.sac['az']=az tr.stats.sac['baz']=baz tr.stats.sac['kuser0']=id2.split('.')[0] tr.stats.sac['kuser1']=id2.split('.')[2] tr.stats.sac['kuser2']=id2.split('.')[3] if params is not None: tr.stats.sac['user3']=params[0] tr.stats.sac['user4']=params[1] tr.stats.sac['user5']=params[2] tr.stats.sac['user6']=params[3] tr.stats.sac['user7']=params[4] tr.stats.sac['user8']=params[5] #- open file and write correlation function fileid=outdir+id1+'.'+id2+'.'+corrtype+'.'+\ corrname+timestring+'.SAC' tr.write(fileid,format='SAC') if phaseweight is not None: fileid_cwt=outdir+id1+'.'+id2+'.'+corrtype+\ '.'+corrname+timestring+'.npy' np.save(fileid_cwt,phaseweight)
def test_readThreadSafe(self): """ Tests for race conditions. Reading n_threads (currently 30) times the same waveform file in parallel and compare the results which must be all the same. """ data = np.arange(0, 500) start = UTCDateTime(2009, 1, 13, 12, 1, 2, 999000) formats = _getEntryPoints('obspy.plugin.waveform', 'writeFormat') for format in formats: # XXX: skip SEGY and SU formats for now as they need some special # headers. if format in ['SEGY', 'SU', 'SEG2']: continue dt = np.dtype("int") if format in ('MSEED', 'GSE2'): dt = "int32" tr = Trace(data=data.astype(dt)) tr.stats.network = "BW" tr.stats.station = "MANZ1" tr.stats.location = "00" tr.stats.channel = "EHE" tr.stats.calib = 0.999999 tr.stats.delta = 0.005 tr.stats.starttime = start # create waveform file with given format and byte order outfile = NamedTemporaryFile().name tr.write(outfile, format=format) if format == 'Q': outfile += '.QHD' n_threads = 30 streams = [] def testFunction(streams): st = read(outfile, format=format) streams.append(st) # Read the ten files at one and save the output in the just created # class. for _i in xrange(n_threads): thread = threading.Thread(target=testFunction, args=(streams,)) thread.start() # Loop until all threads are finished. start = time.time() while True: if threading.activeCount() == 1: break # Avoid infinite loop and leave after 120 seconds # such a long time is needed for debugging with valgrind elif time.time() - start >= 120: msg = 'Not all threads finished!' raise Warning(msg) break else: continue # Compare all values which should be identical and clean up files #for data in : # np.testing.assert_array_equal(values, original) os.remove(outfile) if format == 'Q': os.remove(outfile[:-4] + '.QBN') os.remove(outfile[:-4])
def ncExtract(address): """ This function extract a station (data, response file, header) from a netCDF file : type rootgrp: netCDF4.Dataset : param rootgrp: a netCDF version 4 group that contains one event : type tr: class 'obspy.core.trace.Trace' : param tr: the trace that will be extracted from the nc file : type resp_read: str : param resp_read: the whole response file of the trace in one string format extracted from the info/respfile attribute """ global rootgrp if not os.path.isdir(os.path.join(address, 'Resp_NC')): os.mkdir(os.path.join(address, 'Resp_NC')) if not os.path.isdir(os.path.join(address, 'BH_NC')): os.mkdir(os.path.join(address, 'BH_NC')) root_grps = rootgrp.groups num_iter = 1 print "\n----------------------------" print "Number of all available" print "stations in the netCDF file:" print len(root_grps) print "----------------------------\n" if not input['noaxisem'] == 'Y': axi_open = open(os.path.join(address, 'STATIONS'), 'w') axi_open.writelines(rootgrp.axisem[17:]) axi_open.close for grp in root_grps: print str(num_iter), stgrp = root_grps[grp] stdata = stgrp.variables['data'][:] resp_read = stgrp.respfile if not resp_read == 'NO RESPONSE FILE AVAILABLE': resp_open = open(os.path.join(address, 'Resp_NC', 'RESP.' + stgrp.identity), 'w') resp_open.writelines(resp_read) resp_open.close else: print '\nNO RESPONSE FILE AVAILABLE for ' + stgrp.identity ststats = {} for key in range(0, len(eval(stgrp.headerK))): ststats[eval(stgrp.headerK)[key]] = eval(stgrp.headerV)[key] tr = Trace(stdata, ststats) tr.write(os.path.join(address, 'BH_NC', stgrp.identity), format = 'SAC') num_iter += 1
def test_readAndWrite(self): """ Tests read and write methods for all installed waveform plug-ins. """ data = np.arange(0, 2000) start = UTCDateTime(2009, 1, 13, 12, 1, 2, 999000) formats = _getEntryPoints('obspy.plugin.waveform', 'writeFormat') for format in formats: # XXX: skip SEGY and SU formats for now as they need some special # headers. if format in ['SEGY', 'SU', 'SEG2']: continue for native_byteorder in ['<', '>']: for byteorder in ['<', '>', '=']: # new trace object in native byte order dt = np.dtype("int").newbyteorder(native_byteorder) if format in ('MSEED', 'GSE2'): # MiniSEED and GSE2 cannot write int64, enforce type dt = "int32" tr = Trace(data=data.astype(dt)) tr.stats.network = "BW" tr.stats.station = "MANZ1" tr.stats.location = "00" tr.stats.channel = "EHE" tr.stats.calib = 0.199999 tr.stats.delta = 0.005 tr.stats.starttime = start # create waveform file with given format and byte order outfile = NamedTemporaryFile().name tr.write(outfile, format=format, byteorder=byteorder) if format == 'Q': outfile += '.QHD' # read in again using auto detection st = read(outfile) self.assertEquals(len(st), 1) self.assertEquals(st[0].stats._format, format) # read in using format argument st = read(outfile, format=format) self.assertEquals(len(st), 1) self.assertEquals(st[0].stats._format, format) # read in using a StringIO instances, skip Q files as it # needs multiple files if format not in ['Q']: # file handler without format temp = open(outfile, 'rb') st = read(temp) self.assertEquals(len(st), 1) self.assertEquals(st[0].stats._format, format) # file handler with format temp = open(outfile, 'rb') st = read(temp, format=format) self.assertEquals(len(st), 1) self.assertEquals(st[0].stats._format, format) # StringIO without format temp = StringIO.StringIO(open(outfile, 'rb').read()) st = read(temp) self.assertEquals(len(st), 1) self.assertEquals(st[0].stats._format, format) # StringIO with format temp = StringIO.StringIO(open(outfile, 'rb').read()) st = read(temp, format=format) self.assertEquals(len(st), 1) self.assertEquals(st[0].stats._format, format) # cStringIO without format temp = cStringIO.StringIO(open(outfile, 'rb').read()) st = read(temp) self.assertEquals(len(st), 1) self.assertEquals(st[0].stats._format, format) # cStringIO with format temp = cStringIO.StringIO(open(outfile, 'rb').read()) st = read(temp, format=format) self.assertEquals(len(st), 1) self.assertEquals(st[0].stats._format, format) # check byte order self.assertEquals(st[0].data.dtype.byteorder, '=') # check meta data # some formats do not contain a calibration factor if format not in ['MSEED', 'WAV', 'TSPAIR', 'SLIST']: self.assertAlmostEquals(st[0].stats.calib, 0.199999, 5) else: self.assertEquals(st[0].stats.calib, 1.0) if format not in ['WAV']: self.assertEquals(st[0].stats.starttime, start) self.assertEquals(st[0].stats.endtime, start + 9.995) self.assertEquals(st[0].stats.delta, 0.005) self.assertEquals(st[0].stats.sampling_rate, 200.0) # network/station/location/channel codes if format in ['Q', 'SH_ASC', 'GSE2']: # no network or location code in Q, SH_ASC, GSE2 self.assertEquals(st[0].id, ".MANZ1..EHE") elif format not in ['WAV']: self.assertEquals(st[0].id, "BW.MANZ1.00.EHE") # remove temporary files os.remove(outfile) # Q files consist of two files - deleting additional file if format == 'Q': os.remove(outfile[:-4] + '.QBN') os.remove(outfile[:-4])
# for ia, a in enumerate(gminor): # # if ia >50:continue # binfileA = [a] # binfileB = [gmajor[ia]] # c3 = c3_from_bin_fun(s1, s2, d, binfileA, binfileB) # datac3 = np.vstack((datac3, c3)) # print np.shape(datac3) #corrc3 = stack(datac3, stack_method='linear') t = Trace() t.stats.station = 'pc3' t.stats.channel = '%03d' % day t.stats.sampling_rate = df t.data = np.array(c3[::-1]) t.stats.starttime -= (len(c3) / 2) / df try: os.makedirs('pc3/EE/%s/%s/' % (s1, s2)) except: pass t.write('pc3/EE/%s/%s/pc3.%s.%s.%03d.EE.mseed' % (s1, s2, s1, s2, day), format='MSEED') if __name__ == '__main__': #staTarget1 = '235713' #staTarget2 = '236977' #depth = 0. prestackc3(sys.argv[1:]) #EOF
def main(): db = connect() logging.basicConfig(level=logging.INFO, format='%(asctime)s [%(levelname)s] %(message)s', datefmt='%Y-%m-%d %H:%M:%S') logging.info('*** Starting: Compute SARA_RATIO ***') while is_next_job(db, jobtype='SARA_RATIO'): t0 = time.time() jobs = get_next_job(db, jobtype='SARA_RATIO') stations = [] pairs = [] refs = [] for job in jobs: refs.append(job.ref) pairs.append(job.pair) netsta1, netsta2 = job.pair.split(':') stations.append(netsta1) stations.append(netsta2) goal_day = job.day stations = np.unique(stations) logging.info("New SARA Job: %s (%i pairs with %i stations)" % (goal_day, len(pairs), len(stations))) logging.debug( "Preloading all envelopes and applying site and sensitivity") all = {} for station in stations: tmp = get_sara_param(db, station) sensitivity = tmp.sensitivity site_effect = tmp.site_effect try: tmp = read( os.path.join("SARA", "ENV", station, "%s.MSEED" % goal_day)) except: logging.debug("Error reading %s:%s" % (station, goal_day)) continue for trace in tmp: trace.data /= (sensitivity * site_effect) all[station] = tmp logging.debug("Computing all pairs") for job in jobs: netsta1, netsta2 = job.pair.split(':') net1, sta1 = netsta1.split(".") net2, sta2 = netsta2.split(".") trace = Trace() if netsta1 not in all or netsta2 not in all: update_job(db, job.day, job.pair, 'SARA_RATIO', 'D', ref=job.ref) continue tmp = Stream() for tr in all[netsta1]: tmp += tr for tr in all[netsta2]: tmp += tr # tmp = Stream(traces=[all[netsta1], all[netsta2]]) # print(tmp) tmp.merge() tmp = make_same_length(tmp) tmp.merge(fill_value=np.nan) if len(tmp) > 1: trace.data = tmp.select(network=net1, station=sta1)[0].data / \ tmp.select(network=net2, station=sta2)[0].data trace.stats.starttime = tmp[0].stats.starttime trace.stats.delta = tmp[0].stats.delta env_output_dir = os.path.join('SARA', 'RATIO', job.pair.replace(":", "_")) if not os.path.isdir(env_output_dir): os.makedirs(env_output_dir) trace.write(os.path.join(env_output_dir, goal_day + '.MSEED'), format="MSEED", encoding="FLOAT32") update_job(db, job.day, job.pair, 'SARA_RATIO', 'D', ref=job.ref) del tmp logging.info("Done. It took %.2f seconds" % (time.time() - t0))
def ncExtract(address): """ This function extract a station (data, response file, header) from a netCDF file : type rootgrp: netCDF4.Dataset : param rootgrp: a netCDF version 4 group that contains one event : type tr: class 'obspy.core.trace.Trace' : param tr: the trace that will be extracted from the nc file : type resp_read: str : param resp_read: the whole response file of the trace in one string format extracted from the info/respfile attribute """ global rootgrp if not os.path.isdir(os.path.join(address, 'Resp_NC')): os.mkdir(os.path.join(address, 'Resp_NC')) if not os.path.isdir(os.path.join(address, 'BH_NC')): os.mkdir(os.path.join(address, 'BH_NC')) root_grps = rootgrp.groups num_iter = 1 print "\n----------------------------" print "Number of all available" print "stations in the netCDF file:" print len(root_grps) print "----------------------------\n" if not input['noaxisem'] == 'Y': axi_open = open(os.path.join(address, 'STATIONS'), 'w') axi_open.writelines(rootgrp.axisem[17:]) axi_open.close for grp in root_grps: print str(num_iter), stgrp = root_grps[grp] stdata = stgrp.variables['data'][:] resp_read = stgrp.respfile if not resp_read == 'NO RESPONSE FILE AVAILABLE': resp_open = open( os.path.join(address, 'Resp_NC', 'RESP.' + stgrp.identity), 'w') resp_open.writelines(resp_read) resp_open.close else: print '\nNO RESPONSE FILE AVAILABLE for ' + stgrp.identity ststats = {} for key in range(0, len(eval(stgrp.headerK))): ststats[eval(stgrp.headerK)[key]] = eval(stgrp.headerV)[key] tr = Trace(stdata, ststats) tr.write(os.path.join(address, 'BH_NC', stgrp.identity), format='SAC') num_iter += 1
def test_readAndWrite(self): """ Tests read and write methods for all installed waveform plug-ins. """ data = np.arange(0, 2000) start = UTCDateTime(2009, 1, 13, 12, 1, 2, 999000) formats = _getEntryPoints('obspy.plugin.waveform', 'writeFormat') for format in formats: # XXX: skip SEGY and SU formats for now as they need some special # headers. if format in ['SEGY', 'SU', 'SEG2']: continue for native_byteorder in ['<', '>']: for byteorder in ['<', '>', '=']: # new trace object in native byte order dt = np.dtype("int").newbyteorder(native_byteorder) if format in ('MSEED', 'GSE2'): # MiniSEED and GSE2 cannot write int64, enforce type dt = "int32" tr = Trace(data=data.astype(dt)) tr.stats.network = "BW" tr.stats.station = "MANZ1" tr.stats.location = "00" tr.stats.channel = "EHE" tr.stats.calib = 0.199999 tr.stats.delta = 0.005 tr.stats.starttime = start # create waveform file with given format and byte order outfile = NamedTemporaryFile().name tr.write(outfile, format=format, byteorder=byteorder) if format == 'Q': outfile += '.QHD' # read in again using auto detection st = read(outfile) self.assertEquals(len(st), 1) self.assertEquals(st[0].stats._format, format) # read in using format argument st = read(outfile, format=format) self.assertEquals(len(st), 1) self.assertEquals(st[0].stats._format, format) # read in using a StringIO instances, skip Q files as it # needs multiple files if format not in ['Q']: # file handler without format temp = open(outfile, 'rb') st = read(temp) self.assertEquals(len(st), 1) self.assertEquals(st[0].stats._format, format) # file handler with format temp = open(outfile, 'rb') st = read(temp, format=format) self.assertEquals(len(st), 1) self.assertEquals(st[0].stats._format, format) # StringIO without format temp = StringIO.StringIO(open(outfile, 'rb').read()) st = read(temp) self.assertEquals(len(st), 1) self.assertEquals(st[0].stats._format, format) # StringIO with format temp = StringIO.StringIO(open(outfile, 'rb').read()) st = read(temp, format=format) self.assertEquals(len(st), 1) self.assertEquals(st[0].stats._format, format) # cStringIO without format temp = cStringIO.StringIO(open(outfile, 'rb').read()) st = read(temp) self.assertEquals(len(st), 1) self.assertEquals(st[0].stats._format, format) # cStringIO with format temp = cStringIO.StringIO(open(outfile, 'rb').read()) st = read(temp, format=format) self.assertEquals(len(st), 1) self.assertEquals(st[0].stats._format, format) # check byte order self.assertEquals(st[0].data.dtype.byteorder, '=') # check meta data # some formats do not contain a calibration factor if format not in ['MSEED', 'WAV', 'TSPAIR', 'SLIST']: self.assertAlmostEquals(st[0].stats.calib, 0.199999, 5) else: self.assertEquals(st[0].stats.calib, 1.0) if format not in ['WAV']: self.assertEquals(st[0].stats.starttime, start) self.assertEquals(st[0].stats.endtime, start + 9.995) self.assertEquals(st[0].stats.delta, 0.005) self.assertEquals(st[0].stats.sampling_rate, 200.0) # network/station/location/channel codes if format in ['Q', 'SH_ASC', 'GSE2']: # no network or location code in Q, SH_ASC, GSE2 self.assertEquals(st[0].id, ".MANZ1..EHE") elif format not in ['WAV']: self.assertEquals(st[0].id, "BW.MANZ1.00.EHE") # remove temporary files os.remove(outfile) # Q files consist of two files - deleting additional file if format == 'Q': os.remove(outfile[:-4] + '.QBN') os.remove(outfile[:-4])
else: for ia, a in enumerate(gminor): # if ia >50:continue binfileA = [a] binfileB = [gmajor[ia]] c3 = c3_from_bin_fun(s1, s2, d, binfileA, binfileB) datac3 = np.vstack((datac3, c3)) print np.shape(datac3) corrc3 = stack(datac3, stack_method='linear') t = Trace() t.stats.station = 'pc3' t.stats.sampling_rate = df t.data = np.array(corrc3[::-1]) t.stats.starttime -= (len(corrc3) / 2) / df if sorted: t.write('pc3/prestackc3.%s.%s.mseed' % (s1, s2), format='MSEED') else: t.write('pc3/random_prestackc3.%s.%s.mseed' % (s1, s2), format='MSEED') if __name__ == '__main__': #staTarget1 = '235713' #staTarget2 = '236977' #depth = 0. prestackc3(sys.argv[1:]) #EOF
def test_readThreadSafe(self): """ Tests for race conditions. Reading n_threads (currently 30) times the same waveform file in parallel and compare the results which must be all the same. """ data = np.arange(0, 500) start = UTCDateTime(2009, 1, 13, 12, 1, 2, 999000) formats = _getEntryPoints('obspy.plugin.waveform', 'writeFormat') for format in formats: # XXX: skip SEGY and SU formats for now as they need some special # headers. if format in ['SEGY', 'SU', 'SEG2']: continue dt = np.dtype("int") if format in ('MSEED', 'GSE2'): dt = "int32" tr = Trace(data=data.astype(dt)) tr.stats.network = "BW" tr.stats.station = "MANZ1" tr.stats.location = "00" tr.stats.channel = "EHE" tr.stats.calib = 0.999999 tr.stats.delta = 0.005 tr.stats.starttime = start # create waveform file with given format and byte order outfile = NamedTemporaryFile().name tr.write(outfile, format=format) if format == 'Q': outfile += '.QHD' n_threads = 30 streams = [] def testFunction(streams): st = read(outfile, format=format) streams.append(st) # Read the ten files at one and save the output in the just created # class. for _i in xrange(n_threads): thread = threading.Thread(target=testFunction, args=(streams, )) thread.start() # Loop until all threads are finished. start = time.time() while True: if threading.activeCount() == 1: break # Avoid infinite loop and leave after 120 seconds # such a long time is needed for debugging with valgrind elif time.time() - start >= 120: msg = 'Not all threads finished!' raise Warning(msg) break else: continue # Compare all values which should be identical and clean up files #for data in : # np.testing.assert_array_equal(values, original) os.remove(outfile) if format == 'Q': os.remove(outfile[:-4] + '.QBN') os.remove(outfile[:-4])