traceE = Trace(az) if PlotUnit == 'VEL': #integrate accerelation if plotting velocity traceN.integrate(method='cumtrapz') traceE.integrate(method='cumtrapz') plottheta = theta[thetacount] * 180 / np.pi thetacount = thetacount + 1 # store stats stationname = sta + '%04d' % i channelnameN = cha + '%s' % 'N' channelnameE = cha + '%s' % 'E' # for NS components statsN = Stats() statsN.sampling_rate = 1.0 / sampling_rate_x statsN.delta = sampling_rate_x statsN.starttime = starttime statsN.npts = len(traceN.data) statsN.network = net statsN.station = stationname statsN.location = '' statsN.channel = channelnameN traceN.stats = statsN traceN.stats.sac = obspy.core.AttribDict() traceN.stats.sac.back_azimuth = plottheta # use this as azimuth of station #---applying filters---# traceN.filter('bandpass', freqmin=freqmin, freqmax=freqmax) tN = traceN.stats.starttime traceN.trim(starttime=tN, endtime=tN + trim_end_time) traceN.taper(0.05, side='right')
def read_TEXCEL_CSV(filename, **kwargs): """ Reads a texcel csv file and returns a uquake Stream object. .. warning:: This function should NOT be called directly, it registers via the uquake :func:`~uquake.core.stream.read` function, call this instead. :param filename: the path to the file :param kwargs: :return: ~uquake.core.stream.Stream """ with open(filename) as fle: x = [] y = [] z = [] for k, line in enumerate(fle): if k == 0: if 'MICROPHONE' in line: offset = 9 else: offset = 8 # header if k < 2: continue val = line.strip().split(',') # relative time if k == 3: rt0 = timedelta(seconds=float(val[0])) elif k == 6: station = str(eval(val[offset])) elif k == 7: date = val[offset] elif k == 8: date_time = date + " " + val[offset] datetime = parse(date_time) starttime = datetime + rt0 elif k == 9: site = val[offset] elif k == 10: location = val[offset] elif k == 17: sensitivity_x = float(val[offset]) sensitivity_y = float(val[offset + 1]) sensitivity_z = float(val[offset + 2]) elif k == 18: range_x = float(val[offset]) range_y = float(val[offset + 1]) range_z = float(val[offset + 2]) elif k == 19: trigger_x = float(val[offset]) trigger_y = float(val[offset + 1]) trigger_z = float(val[offset + 2]) elif k == 20: si_x = float(val[offset]) si_y = float(val[offset + 1]) si_z = float(val[offset + 2]) elif k == 21: sr_x = float(val[offset]) sr_y = float(val[offset + 1]) sr_z = float(val[offset + 2]) x.append(float(val[1])) y.append(float(val[2])) z.append(float(val[3])) x = np.array(x) y = np.array(y) z = np.array(z) stats = Stats() stats.network = site stats.delta = si_x / 1000.0 stats.npts = len(x) stats.location = location stats.station = station stats.starttime = UTCDateTime(starttime) stats.channel = 'radial' tr_x = Trace(data=x / 1000.0, header=stats) stats.delta = si_y / 1000.0 stats.channel = 'transverse' tr_y = Trace(data=y / 1000.0, header=stats) stats.delta = si_z / 1000.0 stats.channel = 'vertical' tr_z = Trace(data=z / 1000.0, header=stats) return Stream(traces=[tr_x, tr_y, tr_z])
def parseISF(self, isf_data, header_only=None, convert=None): """ Determine starting point of data. Header has something like 'CURVE #520000', where ascii '5' is the length of the length field '20000'. :: 'CURVE #520000xxxx...' ^ ^^ ^ | || +---- data_loc: location of first valid byte of data | |+--------- len_loc: location of first byte of data length field | +---------- len_len_loc: location of length of length +----------------- tag_loc: location of start tag """ start_tag = 'CURVE #' tag_loc = int(isf_data.find(start_tag)) len_len_loc = tag_loc + len(start_tag) len_loc = len_len_loc + 1 len_len = int(isf_data[len_len_loc]) # e.g. 5 data_loc = len_loc + len_len data_len = int(isf_data[len_loc:len_loc+len_len]) # e.g. 20000 # Extract and parse header header = isf_data[:tag_loc] # Reformat the header into a dictionary header_dict = {} items = header.replace(':WFMPRE:','').replace(':','').split(';') # list of "key value" pairs for item in items: if(item): key, value = item.split(' ', 1) # maxsplit 1 to ignore subsequent spaces in value value = value.replace('"', '') header_dict[key] = value if(header_only): return header_dict # Extract data and convert from string to integer value data = isf_data[data_loc:] stats = Stats() stats.npts = int(header_dict['NR_PT']) stats.calib = float(header_dict['YMULT']) byte_order = header_dict['BYT_OR'] # 'MSB' or 'LSB' if(byte_order == 'MSB'): byte_order = '>' else: byte_order = '<' points = [] for i in range(0, stats.npts*2, 2): value = data[i:i+2] # as string converted = struct.unpack('%sh' % byte_order, value)[0] points.append(converted) # Optionally convert points to engineering units if(convert): try: points = np.array(points) * stats.calib # requires numpy except NameError: # If numpy not available, use list instead. p = [] for point in points: p.append(point * stats.calib) points = p stats.time_offset = float(header_dict['XZERO']) stats.calib_unit = header_dict['YUNIT'] stats.delta = float(header_dict['XINCR']) stats.amp_offset = float(header_dict['YOFF']) stats.comments = header_dict['WFID'] stats.channel = header_dict['WFID'][0:3] return stats, points