def process(self, stream): """Run algorithm for a stream. Processes all traces in the stream. Parameters ---------- stream : obspy.core.Stream stream of data to process Returns ------- out : obspy.core.Stream stream containing 1 trace per original trace. """ out = Stream() for trace in stream: data = trace.data step = self.decimation filtered = self.firfilter(data, self.window, step) stats = Stats(trace.stats) # stats.channel = trace_chan_dict2[stats.channel] stats.starttime = trace.stats.starttime + \ self.numtaps * self.sample_period // 2 stats.delta = stats.delta * step # stats.processing.append('[Gaussian Filter]') stats.npts = filtered.shape[0] trace_out = self.create_trace(stats.channel, stats, filtered) out += trace_out return out
def ascii(path, filenames): """ Reads SPECFEM3D-style ascii data """ from numpy import loadtxt from obspy.core import Stream, Stats, Trace stream = Stream() for filename in filenames: stats = Stats() data = loadtxt(path +'/'+ filename) stats.filename = filename stats.starttime = data[0,0] stats.sampling_rate = data[0,1] - data[0,0] stats.npts = len(data[:,0]) try: parts = filename.split('.') stats.network = parts[0] stats.station = parts[1] stats.channel = temp[2] except: pass stream.append(Trace(data=data[:,1], header=stats)) return stream
def read_ascii(path, NR, nt): from numpy import loadtxt from obspy.core import Stream, Stats, Trace dat_type = 'semd' comp1 = 'FXX' comp2 = 'FXY' stream = Stream() for rec_x in range(0,NR): file_name_in1 = path + 'P.R' + str(int(rec_x+1)) + '.' + comp1 + '.' + dat_type file_name_in2 = path + 'P.R' + str(int(rec_x+1)) + '.' + comp2 + '.' + dat_type xz1 = np.genfromtxt(file_name_in1) xz2 = np.genfromtxt(file_name_in2) deg = 0.0 alpha = np.arctan(xz2[:nt,1]/(1.0e-40 + xz1[:nt,1])) # angle of projection direction = np.sign(np.cos(deg*np.pi/180.0)*xz1[:nt,1]*np.cos(alpha) + np.sin(deg*np.pi/180.0)*xz2[:nt,1]*np.cos(alpha)) data = direction*np.sqrt(xz1[:nt,1]**2 + xz2[:nt,1]**2)*np.cos(alpha) # scalar radial component stats = Stats() stats.filename = path + 'P.R' + str(int(rec_x+1)) stats.starttime = xz1[0,0] stats.delta = xz1[1,0] - xz1[0,0] stats.npts = len(xz1[:nt,0]) try: parts = filename.split('.') stats.network = parts[0] stats.station = parts[1] stats.channel = temp[2] except: pass stream.append(Trace(data=data[:], header=stats)) return stream
def ascii(path, filenames): from numpy import loadtxt from obspy.core import Stream, Stats, Trace stream = Stream() for filename in filenames: stats = Stats() data = loadtxt(path + '/' + filename) stats.filename = filename stats.starttime = data[0, 0] stats.sampling_rate = data[0, 1] - data[0, 0] stats.npts = len(data[:, 0]) try: parts = filename.split('.') stats.network = parts[0] stats.station = parts[1] stats.channel = temp[2] except: pass stream.append(Trace(data=data[:, 1], header=stats)) return stream
def __create_trace( data, network="NT", station="BOU", channel="H", location="R0", data_interval="second", data_type="interval", ): """ Utility to create a trace containing the given numpy array. Parameters ---------- data: array The array to be inserted into the trace. Returns ------- obspy.core.Stream Stream containing the channel. """ stats = Stats() stats.starttime = UTCDateTime("2019-12-01") stats.delta = TimeseriesUtility.get_delta_from_interval(data_interval) stats.channel = channel stats.station = station stats.npts = len(data) stats.data_interval = data_interval stats.data_type = data_type numpy_data = numpy.array(data, dtype=numpy.float64) return Trace(numpy_data, stats)
def process_step(self, step, stream): """Filters stream for one step. Filters all traces in stream. Parameters ---------- step : array element step holding variables for one filtering operation stream : obspy.core.Stream stream of data to filter Returns ------- out : obspy.core.Stream stream containing 1 trace per original trace. """ # gather variables from step input_sample_period = step["input_sample_period"] output_sample_period = step["output_sample_period"] window = np.array(step["window"]) decimation = int(output_sample_period / input_sample_period) numtaps = len(window) window = window / sum(window) out = Stream() for trace in stream: filtered = self.firfilter(trace.data, window, decimation) stats = Stats(trace.stats) stats.starttime = stats.starttime + input_sample_period * (numtaps // 2) stats.delta = output_sample_period stats.npts = len(filtered) trace_out = self.create_trace(stats.channel, stats, filtered) out += trace_out return out
def ascii(path, filename): """ Reads SPECFEM3D-style ASCII data :type path: str :param path: path to datasets :type filenames: list :param filenames: files to read """ st = Stream() stats = Stats() time, data = loadtxt(os.path.join(path, filename)).T stats.filename = filename stats.starttime = time[0] stats.delta = time[1] - time[0] stats.npts = len(data) try: parts = filename.split(".") stats.network = parts[0] stats.station = parts[1] stats.channel = parts[2] except: pass st.append(Trace(data=data, header=stats)) return st
def _create_trace(data, channel, starttime, delta=60.): stats = Stats() stats.channel = channel stats.delta = delta stats.starttime = starttime stats.npts = len(data) data = numpy.array(data, dtype=numpy.float64) return Trace(data, stats)
def _create_trace(data, channel, starttime, delta=60.0): stats = Stats() stats.channel = channel stats.delta = delta stats.starttime = starttime stats.npts = len(data) data = numpy.array(data, dtype=numpy.float64) return Trace(data, stats)
def process_step(self, step, stream): """Filters stream for one step. Filters all traces in stream. Parameters ---------- step : array element step holding variables for one filtering operation stream : obspy.core.Stream stream of data to filter Returns ------- out : obspy.core.Stream stream containing 1 trace per original trace. """ # gather variables from step input_sample_period = step["input_sample_period"] output_sample_period = step["output_sample_period"] window = np.array(step["window"]) decimation = int(output_sample_period / input_sample_period) numtaps = len(window) window = window / sum(window) # first output timestamp is in the center of the filter window filter_time_shift = input_sample_period * (numtaps // 2) out = Stream() for trace in stream: # data to filter data = trace.data starttime = trace.stats.starttime + filter_time_shift # align with the output period misalignment = starttime.timestamp % output_sample_period if misalignment != 0: # skip incomplete input starttime = (starttime - misalignment) + output_sample_period input_starttime = starttime - filter_time_shift offset = int(1e-6 + (input_starttime - trace.stats.starttime) / input_sample_period) print(f"Skipping {offset} input samples to align output", file=sys.stderr) data = data[offset:] # check there is still enough data for filter if len(data) < numtaps: continue filtered = self.firfilter(data, window, decimation) stats = Stats(trace.stats) stats.starttime = starttime stats.delta = output_sample_period stats.npts = len(filtered) trace_out = self.create_trace(stats.channel, stats, filtered) out += trace_out return out
def get_obspy_trace(self): """ Return class contents as obspy.Trace object """ stat = Stats() stat.network = self.net.split(b'\x00')[0].decode() stat.station = self.sta.split(b'\x00')[0].decode() location = self.loc.split(b'\x00')[0].decode() if location == '--': stat.location = '' else: stat.location = location stat.channel = self.chan.split(b'\x00')[0].decode() stat.starttime = UTCDateTime(self.start) stat.sampling_rate = self.rate stat.npts = len(self.data) return Trace(data=self.data, header=stat)
def read_specfem_seismogram(output_files, network, station, band): st = Stream() for component in 'ZNE': channel = '%sX%s' % (band, component) filename = os.path.join( output_files, '%s.%s.%s.sem.ascii' % (network, station, channel)) tmp = np.genfromtxt(filename) stats = Stats() stats.network = network stats.station = station stats.channel = channel stats.delta = tmp[1, 0] - tmp[0, 0] stats.npts = tmp.shape[0] stats.starttime = tmp[0, 0] tr = Trace(tmp[:, 1], stats) st += tr return st
def read_specfem_seismogram(output_files, network, station, band): st = Stream() for component in 'ZNE': channel = '%sX%s' % (band, component) filename = os.path.join(output_files, '%s.%s.%s.sem.ascii' % (network, station, channel)) tmp = np.genfromtxt(filename) stats = Stats() stats.network = network stats.station = station stats.channel = channel stats.delta = tmp[1, 0] - tmp[0, 0] stats.npts = tmp.shape[0] stats.starttime = tmp[0, 0] tr = Trace(tmp[:, 1], stats) st += tr return st
def load_axisem3d_data(file,station_code,component): axisem3d_data = Dataset(file) times_a = axisem3d_data.variables['time_points'] axisem3d_stats = Stats() axisem3d_stats.delta = (times_a[1] - times_a[0]) axisem3d_stats.starttime = UTCDateTime(times_a[0]) axisem3d_stats.npts = times_a.size station_code = station_code+('.KO.RTZ') if component =='z': c_a = axisem3d_data.variables[station_code][:, 2] elif component =='t': c_a = axisem3d_data.variables[station_code][:, 1] elif component =='r': c_a = axisem3d_data.variables[station_code][:, 0] else: raise Exception('No component with this name') axisem3d_trace = Trace(c_a, header=axisem3d_stats) axisem3d_data.close() return axisem3d_trace
def process_step(self, step, stream): """Filters stream for one step. Filters all traces in stream. Parameters ---------- step : array element step holding variables for one filtering operation stream : obspy.core.Stream stream of data to filter Returns ------- out : obspy.core.Stream stream containing 1 trace per original trace. """ # gather variables from step input_sample_period = step["input_sample_period"] output_sample_period = step["output_sample_period"] window = np.array(step["window"]) decimation = int(output_sample_period / input_sample_period) numtaps = len(window) window = window / sum(window) out = Stream() for trace in stream: starttime, data = self.align_trace(step, trace) # check that there is still enough data to filter if len(data) < numtaps: continue filtered = self.firfilter(data, window, decimation) stats = Stats(trace.stats) stats.delta = output_sample_period stats.data_interval = step["data_interval"] stats.data_interval_type = step["data_interval_type"] stats.filter_comments = step["filter_comments"] stats.starttime = starttime stats.npts = len(filtered) trace_out = self.create_trace(stats.channel, stats, filtered) out += trace_out return out
def moveout_test(PSS_file, q, phase): """ Creates synthetic PRFs and stacks them after depth migration. Parameters ---------- PSS_file : str Filename of raysum file containing P-Sv-Sh traces. q : float Slowness [s/m]. phase : str Either "P" for Ps or "S" for Sp. Returns ------- z : np.array Depth vector. stack : np.array Receiver function stack. RF_mo : np.array Matrix containing all depth migrated RFs. RF : np.array Matrix containing all RFs. dt : float Sampling interval. PSS : np.array Matrix containing all traces in P-Sv-Sh. """ rayp = q * 1.111949e5 PSS, dt, M, N, shift = read_raysum(phase, PSS_file=PSS_file) # Create receiver functions RF = [] RF_mo = [] stats = Stats() stats.npts = N stats.delta = dt stats.starttime = UTCDateTime(0) for i in range(M): if phase == "P": data, _, IR = it(PSS[i, 0, :], PSS[i, 1, :], dt, shift=shift, width=4) elif phase == "S": data, _, _ = it(PSS[i, 1, :], PSS[i, 0, :], dt, shift=shift, width=4) RF.append(data) z, rfc = moveout(data, stats, UTCDateTime(shift), rayp[i], phase, fname="raysum.dat") RF_mo.append(rfc) stack = np.average(RF_mo, axis=0) plt.close('all') plt.figure() for mo in RF_mo: plt.plot(z, mo) return z, stack, RF_mo, RF, dt, PSS
def _read_tspair(filename, headonly=False, **kwargs): # @UnusedVariable """ Reads a ASCII TSPAIR file and returns an ObsPy Stream object. .. warning:: This function should NOT be called directly, it registers via the ObsPy :func:`~obspy.core.stream.read` function, call this instead. :type filename: str :param filename: ASCII file to be read. :type headonly: bool, optional :param headonly: If set to True, read only the headers. This is most useful for scanning available data in huge (temporary) data sets. :rtype: :class:`~obspy.core.stream.Stream` :return: A ObsPy Stream object. .. rubric:: Example >>> from obspy import read >>> st = read('/path/to/tspair.ascii') """ with open(filename, 'rt') as fh: # read file and split text into channels buf = [] key = False for line in fh: if line.isspace(): # blank line continue elif line.startswith('TIMESERIES'): # new header line key = True buf.append((line, io.StringIO())) elif headonly: # skip data for option headonly continue elif key: # data entry - may be written in multiple columns buf[-1][1].write(line.strip().split()[-1] + ' ') # create ObsPy stream object stream = Stream() for header, data in buf: # create Stats stats = Stats() parts = header.replace(',', '').split() temp = parts[1].split('_') stats.network = temp[0] stats.station = temp[1] stats.location = temp[2] stats.channel = temp[3] stats.sampling_rate = parts[4] # quality only used in MSEED # don't put blank quality code into 'mseed' dictionary # (quality code is mentioned as optional by format specs anyway) if temp[4]: stats.mseed = AttribDict({'dataquality': temp[4]}) stats.ascii = AttribDict({'unit': parts[-1]}) stats.starttime = UTCDateTime(parts[6]) stats.npts = parts[2] if headonly: # skip data stream.append(Trace(header=stats)) else: data = _parse_data(data, parts[8]) stream.append(Trace(data=data, header=stats)) return stream
def readTSPAIR(filename, headonly=False, **kwargs): # @UnusedVariable """ Reads a ASCII TSPAIR file and returns an ObsPy Stream object. .. warning:: This function should NOT be called directly, it registers via the ObsPy :func:`~obspy.core.stream.read` function, call this instead. :type filename: str :param filename: ASCII file to be read. :type headonly: bool, optional :param headonly: If set to True, read only the headers. This is most useful for scanning available data in huge (temporary) data sets. :rtype: :class:`~obspy.core.stream.Stream` :return: A ObsPy Stream object. .. rubric:: Example >>> from obspy import read >>> st = read('/path/to/tspair.ascii') """ fh = open(filename, "rt") # read file and split text into channels headers = {} key = None for line in fh: if line.isspace(): # blank line continue elif line.startswith("TIMESERIES"): # new header line key = line headers[key] = StringIO() elif headonly: # skip data for option headonly continue elif key: # data entry - may be written in multiple columns headers[key].write(line.strip().split()[-1] + " ") fh.close() # create ObsPy stream object stream = Stream() for header, data in headers.iteritems(): # create Stats stats = Stats() parts = header.replace(",", "").split() temp = parts[1].split("_") stats.network = temp[0] stats.station = temp[1] stats.location = temp[2] stats.channel = temp[3] stats.sampling_rate = parts[4] # quality only used in MSEED stats.mseed = AttribDict({"dataquality": temp[4]}) stats.ascii = AttribDict({"unit": parts[-1]}) stats.starttime = UTCDateTime(parts[6]) stats.npts = parts[2] if headonly: # skip data stream.append(Trace(header=stats)) else: data = _parse_data(data, parts[8]) stream.append(Trace(data=data, header=stats)) return stream
def raw_import(gzip_filename): """ Makes a 'raw' stream file from the gzipped csv file. The csv file has been downloaded from the JAXA website. The method makes a raw stream which does not yet have the frames reconstructed. :type gzip_filename: str :param gzip_filename: gzipped filename of the CSV file to be read. :rtype: :class:`~obspy.core.stream.Stream` :return: A ObsPy Stream object. """ # read the gzipped csv file with gzip.open(gzip_filename, 'rt') as fh: # read file buf = [] header = next(fh).split(',') # read the header # it should contain either 1 channel or 3 if len(header) == 8: # the RESP files use either 'MH1', 'MH2', 'MHZ' # the JAXA files use 'LPX', 'LPY', 'LPZ' # X should point north, Y east, but this is not always the case # so we rename LPX to MH1, and LPY to MH2 channels = ['MH1', 'MH2', 'MHZ'] raw_channels = ['_M1', '_M2', '_MZ'] for line in fh: temp = line.split(',') try: temp[4] = UTCDateTime(temp[4]) except ValueError as e: # this is a specific error which is found in the csv file if temp[4] == '1975-49-11 19:13:04.232000': temp[4] = UTCDateTime('1975-09-11 19:13:04.232000') else: raise try: temp[0] = int(temp[0]) except ValueError as e: # this is a specific error which is found in the csv file if temp[4] == UTCDateTime( '1975-09-15 12:53:36.849000') and temp[0] == '<3': temp[0] = 83 else: raise buf.append( (temp[1], temp[2], temp[4], int(temp[0]), int(temp[3]), int(temp[5]), int(temp[6]), int(temp[7]))) elif len(header) == 6: channels = ['SPZ'] raw_channels = ['_SZ'] for line in fh: # check the manual list of points which have been removed if line in remove_manually: continue temp = line.split(',') # the original order: # frame_count, ap_station, ground_station, nc, time, spz # make a tuple (in a new order so that it can be sorted): # ap_station, ground_station, time, frame_count, nc, spz buf.append( (temp[1], temp[2], UTCDateTime(temp[4]), int(temp[0]), int(temp[3]), int(temp[5]))) # sort by ap_station, ground_station and time (and also everything else, # but that won't matter) buf.sort() stream = Stream() data_x = [] data_y = [] data_z = [] data_sz = [] abs_times = [] frame_count_ncs = [] corr_frame_count_ncs = [] stats = Stats() stats.delta = DELTA network = 'XA' last_id = None for data in buf: # read in the data from the buffer station = data[0].rjust(3, 'S') ground_station = data[1].rjust(2, '0') time = data[2] frame_count = data[3] nc = data[4] # create a combination of frame count and nc - from 0.0 to 89.75 frame_count_nc = float(frame_count) + (float(nc) - 1.) * 0.25 id = "{0:s}.{1:s}.{2:s}.{3:s}".format(network, station, ground_station, channels[0]) # check whether we are adding to an existing one, or creating a new one if (last_id is None or last_id != id): # before creating the new one, add previous trace(s) to the stream if len(abs_times) > 0: _make_traces(stream=stream, stats=stats, header=header, channels=raw_channels, data_x=data_x, data_y=data_y, data_z=data_z, data_sz=data_sz, abs_times=abs_times, frame_count_ncs=frame_count_ncs) data_x = [] data_y = [] data_z = [] data_sz = [] abs_times = [] frame_count_ncs = [] stats = Stats() stats.delta = DELTA stats.starttime = time stats.network = network stats.station = station stats.location = ground_station # add the data) from any line if len(header) == 8: data_x.append(data[5]) data_y.append(data[6]) data_z.append(data[7]) else: data_sz.append(data[5]) abs_times.append(time.timestamp) frame_count_ncs.append(frame_count_nc) last_id = id # add the last one if len(abs_times) > 0: _make_traces(stream=stream, stats=stats, header=header, channels=raw_channels, data_x=data_x, data_y=data_y, data_z=data_z, data_sz=data_sz, abs_times=abs_times, frame_count_ncs=frame_count_ncs) return stream
def save_wave(self): # Fetch a wave from Ring 0 wave = self.ring2buff.get_wave(0) # if wave is empty return if wave == {}: return # Lets try to buffer with python dictionaries and obspy name = wave["station"] + '.' + wave["channel"] + '.' + wave[ "network"] + '.' + wave["location"] if name in self.wave_buffer: # Determine max samples for buffer max_samp = wave["samprate"] * 60 * self.minutes # Create a header: wavestats = Stats() wavestats.station = wave["station"] wavestats.network = wave["network"] wavestats.channel = wave["channel"] wavestats.location = wave["location"] wavestats.sampling_rate = wave["samprate"] wavestats.starttime = UTCDateTime(wave['startt']) # Create a trace wavetrace = Trace(header=wavestats) wavetrace.data = wave["data"] # Try to append data to buffer, if gap shutdown. try: self.wave_buffer[name].append(wavetrace, gap_overlap_check=True) except TypeError as err: logger.warning(err) self.runs = False except: raise self.runs = False # Debug data if self.debug: logger.info("Station Channel combo is in buffer:") logger.info(name) logger.info("Size:") logger.info(self.wave_buffer[name].count()) logger.debug("Data:") logger.debug(self.wave_buffer[name]) else: # First instance of data in buffer, create a header: wavestats = Stats() wavestats.station = wave["station"] wavestats.network = wave["network"] wavestats.channel = wave["channel"] wavestats.location = wave["location"] wavestats.sampling_rate = wave["samprate"] wavestats.starttime = UTCDateTime(wave['startt']) # Create a trace wavetrace = Trace(header=wavestats) wavetrace.data = wave["data"] # Create a RTTrace rttrace = RtTrace(int(self.minutes * 60)) self.wave_buffer[name] = rttrace # Append data self.wave_buffer[name].append(wavetrace, gap_overlap_check=True) # Debug data if self.debug: logger.info("First instance of station/channel:") logger.info(name) logger.info("Size:") logger.info(self.wave_buffer[name].count()) logger.debug("Data:") logger.debug(self.wave_buffer[name])
def readSLIST(filename, headonly=False, **kwargs): # @UnusedVariable """ Reads a ASCII SLIST file and returns an ObsPy Stream object. .. warning:: This function should NOT be called directly, it registers via the ObsPy :func:`~obspy.core.stream.read` function, call this instead. :type filename: str :param filename: ASCII file to be read. :type headonly: bool, optional :param headonly: If set to True, read only the head. This is most useful for scanning available data in huge (temporary) data sets. :rtype: :class:`~obspy.core.stream.Stream` :return: A ObsPy Stream object. .. rubric:: Example >>> from obspy.core import read >>> st = read('/path/to/slist.ascii') """ fh = open(filename, 'rt') # read file and split text into channels headers = {} key = None for line in fh: if line.isspace(): # blank line continue elif line.startswith('TIMESERIES'): # new header line key = line headers[key] = StringIO() elif headonly: # skip data for option headonly continue elif key: # data entry - may be written in multiple columns headers[key].write(line.strip() + ' ') fh.close() # create ObsPy stream object stream = Stream() for header, data in headers.iteritems(): # create Stats stats = Stats() parts = header.replace(',', '').split() temp = parts[1].split('_') stats.network = temp[0] stats.station = temp[1] stats.location = temp[2] stats.channel = temp[3] stats.sampling_rate = parts[4] # quality only used in MSEED stats.mseed = AttribDict({'dataquality': temp[4]}) stats.ascii = AttribDict({'unit': parts[-1]}) stats.starttime = UTCDateTime(parts[6]) stats.npts = parts[2] if headonly: # skip data stream.append(Trace(header=stats)) else: # parse data data.seek(0) if parts[8] == 'INTEGER': data = loadtxt(data, dtype='int', ndlim=1) elif parts[8] == 'FLOAT': data = loadtxt(data, dtype='float32', ndlim=1) else: raise NotImplementedError stream.append(Trace(data=data, header=stats)) return stream
def rf_test(phase, dip, rfloc='output/waveforms/RF', geom_file='3D.geom', decon_meth='it'): """ Creates synthetic PRFs from Raysum data. Parameters ---------- phase : string "P" or "S". dip : int Dip of the LAB in deg, determines, which files to use rfloc : The parental directory, in which the RFs are saved. geom_file : str, optional Filename of the geometry file Returns ------- rfs: list List of RFTrace objects. Will in addition be saved in SAC format. """ # Determine filenames PSS_file = [] for i in range(16): PSS_file.append('3D_' + str(dip) + '_' + str(i) + '.tr') # Read geometry baz, q, dN, dE = read_geom(geom_file, phase) # statlat = dN/(DEG2KM*1000) d = np.sqrt(np.square(dN) + np.square(dE)) az = np.rad2deg(np.arccos(dN / d)) i = np.where(dE < 0) az[i] = az[i] + 180 statlat = [] statlon = [] for azimuth, delta in zip(az, d): if delta == 0: statlat.append(0) statlon.append(0) continue coords = Geodesic.WGS84.Direct(0, 0, azimuth, delta) statlat.append(coords["lat2"]) statlon.append(coords["lon2"]) # for n, longitude in enumerate(lon): # y, _, _ = gps2dist_azimuth(latitude, 0, latitude, longitude) # statlon = dE/(DEG2KM*1000) rayp = q * DEG2KM * 1000 # Read traces stream = [] for f in PSS_file: PSS, dt, _, N, shift = read_raysum(phase, PSS_file=f) stream.append(PSS) streams = np.vstack(stream) del stream M = len(baz) if M != streams.shape[0]: raise ValueError([ "Number of traces", streams.shape[0], """does not equal the number of backazimuths in the geom file""", M ]) rfs = [] odir = os.path.join(rfloc, phase, 'raysum', str(dip)) ch = ['BHP', 'BHV', 'BHH'] # Channel names os.makedirs(odir, exist_ok=True) # Create RF objects for i, st in enumerate(streams): s = Stream() for j, tr in enumerate(st): stats = Stats() stats.npts = N stats.delta = dt stats.starttime = UTCDateTime(0) stats.channel = ch[j] stats.network = 'RS' stats.station = str(dip) s.append(Trace(data=tr, header=stats)) # Create info dictionary for rf creation info = { 'onset': [UTCDateTime(0) + shift], 'starttime': [UTCDateTime(0)], 'statlat': statlat[i], 'statlon': statlon[i], 'statel': 0, 'rayp_s_deg': [rayp[i]], 'rbaz': [baz[i]], 'rdelta': [np.nan], 'ot_ret': [0], 'magnitude': [np.nan], 'evt_depth': [np.nan], 'evtlon': [np.nan], 'evtlat': [np.nan] } rf = createRF(s, phase=phase, method=decon_meth, info=info) # Write RF rf.write(os.path.join(odir, str(i) + '.sac'), format='SAC') rfs.append(rf) return rfs, statlat, statlon
i += 1 # Time_stamps reference to beginning of a week # Set the correct year, month, and day for Time_stamps sttime = UTCDateTime(Time_stamps[0]) endtime = UTCDateTime(Time_stamps[len(Time_stamps) - 1]) sttime._set_year(2017) endtime._set_year(2017) sttime._set_month(8) endtime._set_month(8) sttime._set_day(13 + UTCDateTime(Time_stamps[0]).day) endtime._set_day(13 + UTCDateTime(Time_stamps[len(Time_stamps) - 1]).day) # Define stats stats = Stats() stats.starttime = sttime stats.station = station stats.network = 'NT' stats.location = 'R0' stats.data_interval = '256Hz' stats.delta = .00390625 stats.data_type = 'variation' # Create list of arrays and channel names and intialize counter k arrays = [Hx, Hy, Ex, Ey] k = 0 # Loop over channels to create an obspy stream of the data for ar in arrays: stats.npts = len(ar) stats.channel = channels[k]
def readSLIST(filename, headonly=False, **kwargs): # @UnusedVariable """ Reads a ASCII SLIST file and returns an ObsPy Stream object. .. warning:: This function should NOT be called directly, it registers via the ObsPy :func:`~obspy.core.stream.read` function, call this instead. :type filename: str :param filename: ASCII file to be read. :type headonly: bool, optional :param headonly: If set to True, read only the head. This is most useful for scanning available data in huge (temporary) data sets. :rtype: :class:`~obspy.core.stream.Stream` :return: A ObsPy Stream object. .. rubric:: Example >>> from obspy import read >>> st = read('/path/to/slist.ascii') """ with open(filename, 'rt') as fh: # read file and split text into channels buf = [] key = False for line in fh: if line.isspace(): # blank line continue elif line.startswith('TIMESERIES'): # new header line key = True buf.append((line, StringIO())) elif headonly: # skip data for option headonly continue elif key: # data entry - may be written in multiple columns buf[-1][1].write(line.strip() + ' ') # create ObsPy stream object stream = Stream() for header, data in buf: # create Stats stats = Stats() parts = header.replace(',', '').split() temp = parts[1].split('_') stats.network = temp[0] stats.station = temp[1] stats.location = temp[2] stats.channel = temp[3] stats.sampling_rate = parts[4] # quality only used in MSEED stats.mseed = AttribDict({'dataquality': temp[4]}) stats.ascii = AttribDict({'unit': parts[-1]}) stats.starttime = UTCDateTime(parts[6]) stats.npts = parts[2] if headonly: # skip data stream.append(Trace(header=stats)) else: data = _parse_data(data, parts[8]) stream.append(Trace(data=data, header=stats)) return stream
#**************************************************************************** # data description #**************************************************************************** NR = 50 # number of receivers LEN = 10000 #8000# data length comp1 = 'Uz' # read Heidimode data dz_src = path_in + '/arbzseis' dz_data = np.fromfile(dz_src, dtype='>f') # big endian float (4 bytes) dz_data = dz_data.reshape((NR, LEN), order="F") dz_data = np.float32(dz_data) # write SU-format binary dz_dest = path_out + '/' + comp1 + '_file_single.su' stats = Stats() stats.filename = dz_dest stats.starttime = 0.0 stats.delta = 1.0e-4 # ObsPy DO NOT WORK WITH VERY SMALL TIMESTEP 10.0e-8 stats.npts = LEN stream = Stream() for i in range(NR): stream.append(Trace(data=dz_data[i, :], header=stats)) #stream.append(Trace(data=dz_data[i,:])) print 'max of trace %i is %f' % (i, np.max(dz_data[i, :])) print stream #stream[0].plot() stream.write(dz_dest, format='su')