def main(sgy_file, station_file, output_dir_base): print('PROCESSING segy file ' + sgy_file) # Find starting date of SEGY file and initialize streams for trace in iread_segy(sgy_file): previous_date1 = trace.stats.endtime.date break previous_date2 = previous_date1 previous_date3 = previous_date1 comp1 = Stream() comp2 = Stream() comp3 = Stream() # Loop over traces and merge for trace in iread_segy(sgy_file): # Get date and channel of current trace current_comp = trace.stats.segy.trace_header.trace_sequence_number_within_line # Check if there are any gaps. If yes, fill with 0's if isinstance(trace.data, np.ma.masked_array): trace.data = trace.data.filled() # 1st component if current_comp == 1: # DPZ comp1, previous_date1 = add_trace(trace, day_stream=comp1, previous_date=previous_date1, station_file=station_file, segy_filepath=sgy_file, output_dir=output_dir_base) # 2nd component elif current_comp == 2: # DP1 comp2, previous_date2 = add_trace(trace, day_stream=comp2, previous_date=previous_date2, station_file=station_file, segy_filepath=sgy_file, output_dir=output_dir_base) # 3rd component elif current_comp == 3: # DP2 comp3, previous_date3 = add_trace(trace, day_stream=comp3, previous_date=previous_date3, station_file=station_file, segy_filepath=sgy_file, output_dir=output_dir_base) else: # Something's wrong raise NameError('Unrecognized sequence number')
def findMinBound(): for index, tr in enumerate(iread_segy(segyFile)): rowNumber = index // colCount colNumber = index % colCount xCoor = xStart + xySection * colNumber yCoor = yStart + xySection * rowNumber if math.fabs(yCoor - minNorthing) < 25: logging.info(str(yCoor))
def createTraceCollection(): trace.drop() for index, tr in enumerate(iread_segy(segyFile)): logging.info(index) colNumber = (index % yDepth) rowNumber = ((index // yDepth)) xCoor = xStart + xySection * colNumber yCoor = yStart + xySection * rowNumber trace.insert_one({"x": xCoor, "y": yCoor, "z": (tr.data).tolist()})
def test_iterative_reading(self): """ Tests iterative reading. """ # Read normally. filename = os.path.join(self.path, 'example.y_first_trace') st = obspy.read(filename, unpack_trace_headers=True) # Read iterative. ist = [_i for _i in iread_segy(filename, unpack_headers=True)] del ist[0].stats.segy.textual_file_header del ist[0].stats.segy.binary_file_header del ist[0].stats.segy.textual_file_header_encoding del ist[0].stats.segy.data_encoding del ist[0].stats.segy.endian self.assertEqual(st.traces, ist)
def test_iterative_reading(self): """ Tests iterative reading. """ # Read normally. filename = os.path.join(self.path, 'example.y_first_trace') st = obspy.read(filename, unpack_trace_headers=True) # Read iterative. ist = [_i for _i in iread_segy(filename, unpack_headers=True)] del ist[0].stats.segy.textual_file_header del ist[0].stats.segy.binary_file_header del ist[0].stats.segy.textual_file_header_encoding del ist[0].stats.segy.data_encoding del ist[0].stats.segy.endian self.assertEqual(st.traces, ist)
def createZCollection(): batchSize = 50 for level in range(0, zDepth, batchSize): zArrays = [] for i in range(batchSize): zArrays.append([]) cur = time.time() logging.info(level) for index, tr in enumerate(iread_segy(segyFile)): rowNumber = index // yDepth colNumber = index % yDepth if colNumber == 0: for i in range(batchSize): zArrays[i].append([]) for i in range(batchSize): zArrays[i][rowNumber].append(float(tr.data[level + i])) logging.info(time.time() - cur) for i in range(batchSize): zCollection.insert_one({"z": zArrays[i], "level": level + i})
from itertools import islice from obspy.io.segy.segy import iread_segy import csv from global_variable import * from pymongo import MongoClient import logging import pymongo import time min = 0 max = 0 for index, tr in enumerate(iread_segy(segyFile)): for value in tr.data: if value > max: max = value if value < min: min = value print(min, max)