Exemple #1
0
 def test_readingAndWritingDifferentDataEncodings(self):
     """
     Writes and reads different data encodings and checks if the data
     remains the same.
     """
     # The file uses IBM data encoding.
     file = os.path.join(self.path, 'ld0042_file_00018.sgy_first_trace')
     st = readSEGY(file)
     data = st[0].data
     # All working encodings with corresponding dtypes.
     encodings = {1: 'float32', 2: 'int32', 3: 'int16', 5: 'float32'}
     out_file = NamedTemporaryFile().name
     # Loop over all encodings.
     for data_encoding, dtype in encodings.iteritems():
         this_data = np.require(data.copy(), dtype)
         st[0].data = this_data
         writeSEGY(st, out_file, data_encoding=data_encoding)
         # Read again and compare data.
         this_stream = readSEGY(out_file)
         os.remove(out_file)
         # Both should now be equal. Usually converting from IBM to IEEE
         # floating point numbers might result in small rouning errors but
         # in this case it seems to work. Might be different on different
         # computers.
         np.testing.assert_array_equal(this_data, this_stream[0].data)
Exemple #2
0
 def test_enforcingTextualHeaderEncodingWhileReading(self):
     """
     Tests whether or not the enforcing of the encoding of the textual file
     header actually works.
     """
     # File ld0042_file_00018.sgy_first_trace has an EBCDIC encoding.
     file = os.path.join(self.path, 'ld0042_file_00018.sgy_first_trace')
     # Read once with EBCDIC encoding and check if it is correct.
     st1 = readSEGY(file, textual_header_encoding='EBCDIC')
     self.assertTrue(st1.stats.textual_file_header[3:21]
                     == 'CLIENT: LITHOPROBE')
     # This should also be written the stats dictionary.
     self.assertEqual(st1.stats.textual_file_header_encoding,
                      'EBCDIC')
     # Reading again with ASCII should yield bad results. Lowercase keyword
     # argument should also work.
     st2 = readSEGY(file, textual_header_encoding='ascii')
     self.assertFalse(st2.stats.textual_file_header[3:21]
                      == 'CLIENT: LITHOPROBE')
     self.assertEqual(st2.stats.textual_file_header_encoding,
                      'ASCII')
     # Autodection should also write the textual file header encoding to the
     # stats dictionary.
     st3 = readSEGY(file)
     self.assertEqual(st3.stats.textual_file_header_encoding,
                      'EBCDIC')
Exemple #3
0
 def test_enforcingTextualHeaderEncodingWhileReading(self):
     """
     Tests whether or not the enforcing of the encoding of the textual file
     header actually works.
     """
     # File ld0042_file_00018.sgy_first_trace has an EBCDIC encoding.
     file = os.path.join(self.path, 'ld0042_file_00018.sgy_first_trace')
     # Read once with EBCDIC encoding and check if it is correct.
     st1 = readSEGY(file, textual_header_encoding='EBCDIC')
     self.assertTrue(st1.stats.textual_file_header[3:21] \
                     == 'CLIENT: LITHOPROBE')
     # This should also be written the stats dictionary.
     self.assertEqual(st1.stats.textual_file_header_encoding,
                      'EBCDIC')
     # Reading again with ASCII should yield bad results. Lowercase keyword
     # argument should also work.
     st2 = readSEGY(file, textual_header_encoding='ascii')
     self.assertFalse(st2.stats.textual_file_header[3:21] \
                     == 'CLIENT: LITHOPROBE')
     self.assertEqual(st2.stats.textual_file_header_encoding,
                      'ASCII')
     # Autodection should also write the textual file header encoding to the
     # stats dictionary.
     st3 = readSEGY(file)
     self.assertEqual(st3.stats.textual_file_header_encoding,
                      'EBCDIC')
Exemple #4
0
 def test_readingAndWritingDifferentDataEncodings(self):
     """
     Writes and reads different data encodings and checks if the data
     remains the same.
     """
     # The file uses IBM data encoding.
     file = os.path.join(self.path, 'ld0042_file_00018.sgy_first_trace')
     st = readSEGY(file)
     data = st[0].data
     # All working encodings with corresponding dtypes.
     encodings = {1: 'float32',
                  2: 'int32',
                  3: 'int16',
                  5: 'float32'}
     with NamedTemporaryFile() as tf:
         out_file = tf.name
         # Loop over all encodings.
         for data_encoding, dtype in encodings.iteritems():
             this_data = np.require(data.copy(), dtype)
             st[0].data = this_data
             writeSEGY(st, out_file, data_encoding=data_encoding)
             # Read again and compare data.
             this_stream = readSEGY(out_file)
             # Both should now be equal. Usually converting from IBM to IEEE
             # floating point numbers might result in small rouning errors
             # but in this case it seems to work. Might be different on
             # different computers.
             np.testing.assert_array_equal(this_data, this_stream[0].data)
Exemple #5
0
    def test_TwoDigitYearsSEGY(self):
        """
        Even tough not specified in the 1975 SEG Y rev 1 standard, 2 digit
        years should be read correctly. Some programs produce them.

        Every two digit year < 30 will be mapped to 2000-2029 and every two
        digit year >=30 <100 will be mapped to 1930-1999.
        """
        # Read two artificial test files and check the years.
        filename = os.path.join(self.path, 'one_trace_year_11.sgy')
        st = readSEGY(filename)
        self.assertEqual(2011, st[0].stats.starttime.year)
        filename = os.path.join(self.path, 'one_trace_year_99.sgy')
        st = readSEGY(filename)
        self.assertEqual(1999, st[0].stats.starttime.year)
Exemple #6
0
 def test_writingNewSamplingRate(self):
     """
     Setting a new sample rate works.
     """
     file = os.path.join(self.path, '1.sgy_first_trace')
     segy = readSEGY(file)
     segy[0].stats.sampling_rate = 20
     with NamedTemporaryFile() as tf:
         outfile = tf.name
         writeSEGY(segy, outfile)
         new_segy = readSEGY(outfile)
     self.assertEqual(new_segy[0].stats.sampling_rate, 20)
     # The same with the Seismic Unix file.
     file = os.path.join(self.path, '1.su_first_trace')
     _su = readSU(file)
Exemple #7
0
 def test_writingNewSamplingRate(self):
     """
     Setting a new sample rate works.
     """
     file = os.path.join(self.path, '1.sgy_first_trace')
     segy = readSEGY(file)
     segy[0].stats.sampling_rate = 20
     outfile = NamedTemporaryFile().name
     writeSEGY(segy, outfile)
     new_segy = readSEGY(outfile)
     os.remove(outfile)
     self.assertEqual(new_segy[0].stats.sampling_rate, 20)
     # The same with the Seismic Unix file.
     file = os.path.join(self.path, '1.su_first_trace')
     _su = readSU(file)
Exemple #8
0
    def test_TwoDigitYearsSEGY(self):
        """
        Even tough not specified in the 1975 SEG Y rev 1 standard, 2 digit
        years should be read correctly. Some programs produce them.

        Every two digit year < 30 will be mapped to 2000-2029 and every two
        digit year >=30 <100 will be mapped to 1930-1999.
        """
        # Read two artificial test files and check the years.
        filename = os.path.join(self.path, 'one_trace_year_11.sgy')
        st = readSEGY(filename)
        self.assertEqual(2011, st[0].stats.starttime.year)
        filename = os.path.join(self.path, 'one_trace_year_99.sgy')
        st = readSEGY(filename)
        self.assertEqual(1999, st[0].stats.starttime.year)
def SEGYcoordinatesfix(segyfilename):
    section = readSEGY(segyfilename, unpack_trace_headers=True)
    # getting just not repeated coordinate values sx, sy
    ntr = len(section) # number of traces
    sx = numpy.zeros(1)
    sy = numpy.zeros(1)
    trc = numpy.zeros(1) # trace index of not duplicated traces
    cdpx = numpy.zeros(ntr) # original x coordinate
    cdpy = numpy.zeros(ntr) # original y coordinate
    # bytes (181, 185) (cdpx, cdpy) (first point allways in)
    cdpx[0] = section[0].stats.segy.trace_header.x_coordinate_of_ensemble_position_of_this_trace
    cdpy[0] = section[0].stats.segy.trace_header.y_coordinate_of_ensemble_position_of_this_trace
    sx[0] = cdpx[0]
    sy[0] = cdpy[0]
    trc[0] = 0
    for i in numpy.arange(1, ntr): # get just the not duplicated coordinates
        cdpx[i] = section[i].stats.segy.trace_header.x_coordinate_of_ensemble_position_of_this_trace
        cdpy[i] = section[i].stats.segy.trace_header.y_coordinate_of_ensemble_position_of_this_trace
        if (cdpx[i] != cdpx[i-1]) or (cdpy[i] != cdpy[i-1]):  # just when (x, y) == (x, y) ignore
            sx = numpy.append(sx, cdpx[i])
            sy = numpy.append(sy, cdpy[i])
            trc = numpy.append(trc, i)
    #trc (not duplicated indexes = x)
    #sx, sy not duplicated coordinates
    flinearsx = InterpolatedUnivariateSpline(trc, sx, bbox=[-3, ntr+2], k=1) # linear iterp function on xcoordinate ; x is trace index
    flinearsy = InterpolatedUnivariateSpline(trc, sy, bbox=[-3, ntr+2], k=1) # linear iterp function on ycoordinate ; x is trace index
    # (to enable linear extrapolation that interp1 doesn't do) spline=linear iterp function case where spline degree k=1
    # uses limits of extrapolation +3 traces before and after
    for trace_index in numpy.arange(0, ntr, 1): # interpolate for all trace indexes, changing the trace headers on bytes (73, 77)
        section[trace_index].stats.segy.trace_header.source_coordinate_x = int(flinearsx(trace_index))
        section[trace_index].stats.segy.trace_header.source_coordinate_y = int(flinearsy(trace_index))
    fileName, fileExtension = os.path.splitext(segyfilename)
    section.write(fileName+'fixed.segy', format='SEGY')
Exemple #10
0
 def test_largeSampleRateIntervalRaises(self):
     """
     SEG Y supports a sample interval from 1 to 65535 microseconds in steps
     of 1 microsecond. Larger intervals cannot be supported due to the
     definition of the SEG Y format. Therefore the smallest possible
     sampling rate is ~ 15.26 Hz.
     """
     with NamedTemporaryFile() as tf:
         outfile = tf.name
         # Test for SEG Y.
         file = os.path.join(self.path, '1.sgy_first_trace')
         segy = readSEGY(file)
         # Set the largest possible delta value which should just work.
         segy[0].stats.delta = 0.065535
         writeSEGY(segy, outfile)
         # Slightly larger should raise.
         segy[0].stats.delta = 0.065536
         self.assertRaises(SEGYSampleIntervalError, writeSEGY, segy,
                           outfile)
         # Same for SU.
         file = os.path.join(self.path, '1.su_first_trace')
         su = readSU(file)
         # Set the largest possible delta value which should just work.
         su[0].stats.delta = 0.065535
         writeSU(su, outfile)
     # Slightly larger should raise.
     su[0].stats.delta = 0.065536
     self.assertRaises(SEGYSampleIntervalError, writeSU, su, outfile)
Exemple #11
0
 def test_writingUsingCore(self):
     """
     Tests the writing of SEGY rev1 files using obspy.core. It just compares
     the output of writing using obspy.core with the output of writing the
     files using the internal SEGY object which is thoroughly tested in
     obspy.segy.tests.test_segy.
     """
     for file, _ in self.files.iteritems():
         file = os.path.join(self.path, file)
         # Read the file with the internal SEGY representation.
         segy_file = readSEGYInternal(file)
         # Read again using core.
         st = readSEGY(file)
         # Create two temporary files to write to.
         with NamedTemporaryFile() as tf1:
             out_file1 = tf1.name
             with NamedTemporaryFile() as tf2:
                 out_file2 = tf2.name
                 # Write twice.
                 segy_file.write(out_file1)
                 writeSEGY(st, out_file2)
                 # Read and delete files.
                 with open(out_file1, 'rb') as f1:
                     data1 = f1.read()
                 with open(out_file2, 'rb') as f2:
                     data2 = f2.read()
         # Test if they are equal.
         self.assertEqual(data1[3200:3600], data2[3200:3600])
Exemple #12
0
 def test_largeSampleRateIntervalRaises(self):
     """
     SEG Y supports a sample interval from 1 to 65535 microseconds in steps
     of 1 microsecond. Larger intervals cannot be supported due to the
     definition of the SEG Y format. Therefore the smallest possible
     sampling rate is ~ 15.26 Hz.
     """
     outfile = NamedTemporaryFile().name
     # Test for SEG Y.
     file = os.path.join(self.path, '1.sgy_first_trace')
     segy = readSEGY(file)
     # Set the largest possible delta value which should just work.
     segy[0].stats.delta = 0.065535
     writeSEGY(segy, outfile)
     os.remove(outfile)
     # Slightly larger should raise.
     segy[0].stats.delta = 0.065536
     self.assertRaises(SEGYSampleIntervalError, writeSEGY, segy, outfile)
     # Same for SU.
     file = os.path.join(self.path, '1.su_first_trace')
     su = readSU(file)
     # Set the largest possible delta value which should just work.
     su[0].stats.delta = 0.065535
     writeSU(su, outfile)
     os.remove(outfile)
     # Slightly larger should raise.
     su[0].stats.delta = 0.065536
     self.assertRaises(SEGYSampleIntervalError, writeSU, su, outfile)
Exemple #13
0
 def test_readingUsingCore(self):
     """
     This tests checks whether or not all necessary information is read
     during reading with core. It actually just assumes the internal
     SEGYFile object, which is thoroughly tested in
     obspy.segy.tests.test_segy, is correct and compared all values to it.
     This seems to be the easiest way to test everything.
     """
     for file, _ in self.files.iteritems():
         file = os.path.join(self.path, file)
         # Read the file with the internal SEGY representation.
         segy_file = readSEGYInternal(file)
         # Read again using core.
         st = readSEGY(file)
         # They all should have length one because all additional traces
         # have been removed.
         self.assertEqual(len(st), 1)
         # Assert the data is the same.
         np.testing.assert_array_equal(segy_file.traces[0].data, st[0].data)
         # Textual header.
         self.assertEqual(segy_file.textual_file_header, st.stats.textual_file_header)
         # Textual_header_encoding.
         self.assertEqual(segy_file.textual_header_encoding, st.stats.textual_file_header_encoding)
         # Endianness.
         self.assertEqual(segy_file.endian, st.stats.endian)
         # Data encoding.
         self.assertEqual(segy_file.data_encoding, st.stats.data_encoding)
         # Test the file and trace binary headers.
         for key, value in segy_file.binary_file_header.__dict__.iteritems():
             self.assertEqual(getattr(st.stats.binary_file_header, key), value)
         for key, value in segy_file.traces[0].header.__dict__.iteritems():
             self.assertEqual(getattr(st[0].stats.segy.trace_header, key), value)
Exemple #14
0
 def test_settingDataEncodingWorks(self):
     """
     Test whether or not the enforcing the data encoding works.
     """
     # File ld0042_file_00018.sgy_first_trace uses IBM floating point
     # representation.
     file = os.path.join(self.path, 'ld0042_file_00018.sgy_first_trace')
     st = readSEGY(file)
     # First test if it even works.
     out_file = NamedTemporaryFile().name
     writeSEGY(st, out_file)
     with open(out_file, 'rb') as f:
         data1 = f.read()
     os.remove(out_file)
     # Write again and enforce encoding one which should yield the same
     # result.
     writeSEGY(st, out_file, data_encoding=1)
     with open(out_file, 'rb') as f:
         data2 = f.read()
     os.remove(out_file)
     self.assertTrue(data1 == data2)
     # Writing IEEE floats which should not require any dtype changes.
     writeSEGY(st, out_file, data_encoding=5)
     with open(out_file, 'rb') as f:
         data3 = f.read()
     os.remove(out_file)
     self.assertFalse(data1 == data3)
Exemple #15
0
 def test_writingUsingCore(self):
     """
     Tests the writing of SEGY rev1 files using obspy.core. It just compares
     the output of writing using obspy.core with the output of writing the
     files using the internal SEGY object which is thoroughly tested in
     obspy.segy.tests.test_segy.
     """
     for file, _ in self.files.iteritems():
         file = os.path.join(self.path, file)
         # Read the file with the internal SEGY representation.
         segy_file = readSEGYInternal(file)
         # Read again using core.
         st = readSEGY(file)
         # Create two temporary files to write to.
         out_file1 = NamedTemporaryFile().name
         out_file2 = NamedTemporaryFile().name
         # Write twice.
         segy_file.write(out_file1)
         writeSEGY(st, out_file2)
         # Read and delete files.
         with open(out_file1, 'rb') as f1:
             data1 = f1.read()
         with open(out_file2, 'rb') as f2:
             data2 = f2.read()
         os.remove(out_file1)
         os.remove(out_file2)
         # Test if they are equal.
         self.assertEqual(data1[3200:3600], data2[3200:3600])
Exemple #16
0
 def test_settingDataEncodingWorks(self):
     """
     Test whether or not the enforcing the data encoding works.
     """
     # File ld0042_file_00018.sgy_first_trace uses IBM floating point
     # representation.
     file = os.path.join(self.path, 'ld0042_file_00018.sgy_first_trace')
     st = readSEGY(file)
     # First test if it even works.
     with NamedTemporaryFile() as tf:
         out_file = tf.name
         writeSEGY(st, out_file)
         with open(out_file, 'rb') as f:
             data1 = f.read()
         # Write again and enforce encoding one which should yield the same
         # result.
         writeSEGY(st, out_file, data_encoding=1)
         with open(out_file, 'rb') as f:
             data2 = f.read()
         self.assertTrue(data1 == data2)
         # Writing IEEE floats which should not require any dtype changes.
         writeSEGY(st, out_file, data_encoding=5)
         with open(out_file, 'rb') as f:
             data3 = f.read()
         self.assertFalse(data1 == data3)
Exemple #17
0
 def test_enforcingEndiannessWhileReading(self):
     """
     Tests whether or not enforcing the endianness while reading a file
     works. It will actually just deactivate the autodetection in case it
     produced a wrong result. Using a wrong endianness while reading a file
     will still produce an error because the data format will most likely be
     wrong and therefore obspy.segy cannot unpack the data.
     """
     # File ld0042_file_00018.sgy_first_trace is in big endian.
     file = os.path.join(self.path, 'ld0042_file_00018.sgy_first_trace')
     # This should work and write big endian to the stats dictionary.
     st1 = readSEGY(file)
     self.assertEqual(st1.stats.endian, '>')
     # Doing the same with the right endianness should still work.
     st2 = readSEGY(file, byteorder='>')
     self.assertEqual(st2.stats.endian, '>')
     # The wrong endianness should yield an key error because the routine to
     # unpack the wrong data format code cannot be found.
     self.assertRaises(KeyError, readSEGY, file, byteorder='<')
Exemple #18
0
 def test_invalidDataEncodingRaises(self):
     """
     Using an invalid data encoding raises an error.
     """
     file = os.path.join(self.path, "ld0042_file_00018.sgy_first_trace")
     st = readSEGY(file)
     out_file = NamedTemporaryFile().name
     self.assertRaises(SEGYCoreWritingError, writeSEGY, st, out_file, data_encoding=0)
     self.assertRaises(SEGYCoreWritingError, writeSEGY, st, out_file, data_encoding="")
     os.remove(out_file)
Exemple #19
0
 def test_enforcingEndiannessWhileReading(self):
     """
     Tests whether or not enforcing the endianness while reading a file
     works. It will actually just deactivate the autodetection in case it
     produced a wrong result. Using a wrong endianness while reading a file
     will still produce an error because the data format will most likely be
     wrong and therefore obspy.segy cannot unpack the data.
     """
     # File ld0042_file_00018.sgy_first_trace is in big endian.
     file = os.path.join(self.path, 'ld0042_file_00018.sgy_first_trace')
     # This should work and write big endian to the stats dictionary.
     st1 = readSEGY(file)
     self.assertEqual(st1.stats.endian, '>')
     # Doing the same with the right endianness should still work.
     st2 = readSEGY(file, byteorder='>')
     self.assertEqual(st2.stats.endian, '>')
     # The wrong endianness should yield an key error because the routine to
     # unpack the wrong data format code cannot be found.
     self.assertRaises(KeyError, readSEGY, file, byteorder='<')
Exemple #20
0
 def test_enforcingTextualHeaderEncodingWhileWriting(self):
     """
     Tests whether or not the enforcing of the endianness while writing
     works.
     """
     # File ld0042_file_00018.sgy_first_trace has an EBCDIC encoding.
     file = os.path.join(self.path, 'ld0042_file_00018.sgy_first_trace')
     st1 = readSEGY(file)
     # Save the header to compare it later on.
     with open(file, 'rb') as f:
         header = f.read(3200)
     # First write should remain EBCDIC.
     with NamedTemporaryFile() as tf:
         out_file = tf.name
         writeSEGY(st1, out_file)
         st2 = readSEGY(out_file)
         # Compare header.
         with open(out_file, 'rb') as f:
             new_header = f.read(3200)
     self.assertTrue(header == new_header)
     self.assertEqual(st2.stats.textual_file_header_encoding,
                      'EBCDIC')
     # Do once again to enforce EBCDIC.
     writeSEGY(st1, out_file, textual_header_encoding='EBCDIC')
     st3 = readSEGY(out_file)
     # Compare header.
     with open(out_file, 'rb') as f:
         new_header = f.read(3200)
     self.assertTrue(header == new_header)
     os.remove(out_file)
     self.assertEqual(st3.stats.textual_file_header_encoding,
                      'EBCDIC')
     # Enforce ASCII
     writeSEGY(st1, out_file, textual_header_encoding='ASCII')
     st4 = readSEGY(out_file)
     # Compare header. Should not be equal this time.
     with open(out_file, 'rb') as f:
         new_header = f.read(3200)
     self.assertFalse(header == new_header)
     os.remove(out_file)
     self.assertEqual(st4.stats.textual_file_header_encoding,
                      'ASCII')
Exemple #21
0
 def test_enforcingTextualHeaderEncodingWhileWriting(self):
     """
     Tests whether or not the enforcing of the endianness while writing
     works.
     """
     # File ld0042_file_00018.sgy_first_trace has an EBCDIC encoding.
     file = os.path.join(self.path, 'ld0042_file_00018.sgy_first_trace')
     st1 = readSEGY(file)
     # Save the header to compare it later on.
     with open(file, 'rb') as f:
         header = f.read(3200)
     # First write should remain EBCDIC.
     with NamedTemporaryFile() as tf:
         out_file = tf.name
         writeSEGY(st1, out_file)
         st2 = readSEGY(out_file)
         # Compare header.
         with open(out_file, 'rb') as f:
             new_header = f.read(3200)
     self.assertTrue(header == new_header)
     self.assertEqual(st2.stats.textual_file_header_encoding,
                      'EBCDIC')
     # Do once again to enforce EBCDIC.
     writeSEGY(st1, out_file, textual_header_encoding='EBCDIC')
     st3 = readSEGY(out_file)
     # Compare header.
     with open(out_file, 'rb') as f:
         new_header = f.read(3200)
     self.assertTrue(header == new_header)
     os.remove(out_file)
     self.assertEqual(st3.stats.textual_file_header_encoding,
                      'EBCDIC')
     # Enforce ASCII
     writeSEGY(st1, out_file, textual_header_encoding='ASCII')
     st4 = readSEGY(out_file)
     # Compare header. Should not be equal this time.
     with open(out_file, 'rb') as f:
         new_header = f.read(3200)
     self.assertFalse(header == new_header)
     os.remove(out_file)
     self.assertEqual(st4.stats.textual_file_header_encoding,
                      'ASCII')
Exemple #22
0
 def test_issue377(self):
     """
     Tests that readSEGY() and stream.write() should handle negative trace
     header values.
     """
     filename = os.path.join(self.path, 'one_trace_year_11.sgy')
     st = readSEGY(filename)
     st[0].stats.segy.trace_header['source_coordinate_x'] = -1
     outfile = NamedTemporaryFile().name
     st.write(outfile, format='SEGY')
     os.remove(outfile)
Exemple #23
0
 def test_issue377(self):
     """
     Tests that readSEGY() and stream.write() should handle negative trace
     header values.
     """
     filename = os.path.join(self.path, "one_trace_year_11.sgy")
     st = readSEGY(filename)
     st[0].stats.segy.trace_header["source_coordinate_x"] = -1
     outfile = NamedTemporaryFile().name
     st.write(outfile, format="SEGY")
     os.remove(outfile)
Exemple #24
0
 def test_issue377(self):
     """
     Tests that readSEGY() and stream.write() should handle negative trace
     header values.
     """
     filename = os.path.join(self.path, 'one_trace_year_11.sgy')
     st = readSEGY(filename)
     st[0].stats.segy.trace_header['source_coordinate_x'] = -1
     with NamedTemporaryFile() as tf:
         outfile = tf.name
         st.write(outfile, format='SEGY')
Exemple #25
0
 def test_invalidDataEncodingRaises(self):
     """
     Using an invalid data encoding raises an error.
     """
     file = os.path.join(self.path, 'ld0042_file_00018.sgy_first_trace')
     st = readSEGY(file)
     with NamedTemporaryFile() as tf:
         out_file = tf.name
         self.assertRaises(SEGYCoreWritingError, writeSEGY, st, out_file,
                           data_encoding=0)
         self.assertRaises(SEGYCoreWritingError, writeSEGY, st, out_file,
                           data_encoding='')
Exemple #26
0
 def test_invalidDataEncodingRaises(self):
     """
     Using an invalid data encoding raises an error.
     """
     file = os.path.join(self.path, 'ld0042_file_00018.sgy_first_trace')
     st = readSEGY(file)
     with NamedTemporaryFile() as tf:
         out_file = tf.name
         self.assertRaises(SEGYCoreWritingError, writeSEGY, st, out_file,
                           data_encoding=0)
         self.assertRaises(SEGYCoreWritingError, writeSEGY, st, out_file,
                           data_encoding='')
Exemple #27
0
 def test_writingStarttimeTimestamp0(self):
     """
     If the starttime of the Trace is UTCDateTime(0) it will be interpreted
     as a missing starttime is not written. Test if this holds True.
     """
     file = os.path.join(self.path, '1.sgy_first_trace')
     # This file has a set date!
     with open(file, 'rb') as f:
         f.seek(3600 + 156, 0)
         date_time = f.read(10)
     year, julday, hour, minute, second = unpack('>5h', date_time)
     self.assertEqual([
         year == 2005, julday == 353, hour == 15, minute == 7, second == 54
     ], 5 * [True])
     # Read and set zero time.
     segy = readSEGY(file)
     segy[0].stats.starttime = UTCDateTime(0)
     outfile = NamedTemporaryFile().name
     writeSEGY(segy, outfile)
     # Check the new date.
     with open(outfile, 'rb') as f:
         f.seek(3600 + 156, 0)
         date_time = f.read(10)
     os.remove(outfile)
     year, julday, hour, minute, second = unpack('>5h', date_time)
     self.assertEqual(
         [year == 0, julday == 0, hour == 0, minute == 0, second == 0],
         5 * [True])
     # The same for SU.
     file = os.path.join(self.path, '1.su_first_trace')
     # This file has a set date!
     with open(file, 'rb') as f:
         f.seek(156, 0)
         date_time = f.read(10)
     year, julday, hour, minute, second = unpack('<5h', date_time)
     self.assertEqual([
         year == 2005, julday == 353, hour == 15, minute == 7, second == 54
     ], 5 * [True])
     # Read and set zero time.
     su = readSU(file)
     su[0].stats.starttime = UTCDateTime(0)
     outfile = NamedTemporaryFile().name
     writeSU(su, outfile)
     # Check the new date.
     with open(outfile, 'rb') as f:
         f.seek(156, 0)
         date_time = f.read(10)
     os.remove(outfile)
     year, julday, hour, minute, second = unpack('<5h', date_time)
     self.assertEqual(
         [year == 0, julday == 0, hour == 0, minute == 0, second == 0],
         5 * [True])
Exemple #28
0
 def test_enforcingEndiannessWhileWriting(self):
     """
     Tests whether or not the enforcing of the endianness while writing
     works.
     """
     # File ld0042_file_00018.sgy_first_trace is in big endian.
     file = os.path.join(self.path, 'ld0042_file_00018.sgy_first_trace')
     st1 = readSEGY(file)
     # First write should be big endian.
     with NamedTemporaryFile() as tf:
         out_file = tf.name
         writeSEGY(st1, out_file)
         st2 = readSEGY(out_file)
         self.assertEqual(st2.stats.endian, '>')
         # Do once again to enforce big endian.
         writeSEGY(st1, out_file, byteorder='>')
         st3 = readSEGY(out_file)
         self.assertEqual(st3.stats.endian, '>')
         # Enforce little endian.
         writeSEGY(st1, out_file, byteorder='<')
         st4 = readSEGY(out_file)
         self.assertEqual(st4.stats.endian, '<')
Exemple #29
0
 def test_enforcingEndiannessWhileWriting(self):
     """
     Tests whether or not the enforcing of the endianness while writing
     works.
     """
     # File ld0042_file_00018.sgy_first_trace is in big endian.
     file = os.path.join(self.path, 'ld0042_file_00018.sgy_first_trace')
     st1 = readSEGY(file)
     # First write should be big endian.
     with NamedTemporaryFile() as tf:
         out_file = tf.name
         writeSEGY(st1, out_file)
         st2 = readSEGY(out_file)
         self.assertEqual(st2.stats.endian, '>')
         # Do once again to enforce big endian.
         writeSEGY(st1, out_file, byteorder='>')
         st3 = readSEGY(out_file)
         self.assertEqual(st3.stats.endian, '>')
         # Enforce little endian.
         writeSEGY(st1, out_file, byteorder='<')
         st4 = readSEGY(out_file)
         self.assertEqual(st4.stats.endian, '<')
Exemple #30
0
 def test_readHeadOnly(self):
     """
     Tests headonly flag on readSEGY and readSU functions.
     """
     # readSEGY
     file = os.path.join(self.path, '1.sgy_first_trace')
     st = readSEGY(file, headonly=True)
     self.assertEquals(st[0].stats.npts, 8000)
     self.assertEquals(len(st[0].data), 0)
     # readSU
     file = os.path.join(self.path, '1.su_first_trace')
     st = readSU(file, headonly=True)
     self.assertEquals(st[0].stats.npts, 8000)
     self.assertEquals(len(st[0].data), 0)
Exemple #31
0
 def test_readHeadOnly(self):
     """
     Tests headonly flag on readSEGY and readSU functions.
     """
     # readSEGY
     file = os.path.join(self.path, '1.sgy_first_trace')
     st = readSEGY(file, headonly=True)
     self.assertEquals(st[0].stats.npts, 8000)
     self.assertEquals(len(st[0].data), 0)
     # readSU
     file = os.path.join(self.path, '1.su_first_trace')
     st = readSU(file, headonly=True)
     self.assertEquals(st[0].stats.npts, 8000)
     self.assertEquals(len(st[0].data), 0)
Exemple #32
0
 def test_writingModifiedDate(self):
     """
     Tests if the date in Trace.stats.starttime is correctly written in SU
     and SEGY files.
     """
     # Define new date!
     new_date = UTCDateTime(2010, 7, 7, 2, 2, 2)
     with NamedTemporaryFile() as tf:
         outfile = tf.name
         # Test for SEGY.
         file = os.path.join(self.path, 'example.y_first_trace')
         segy = readSEGY(file)
         segy[0].stats.starttime = new_date
         writeSEGY(segy, outfile)
         segy_new = readSEGY(outfile)
         self.assertEqual(new_date, segy_new[0].stats.starttime)
         # Test for SU.
         file = os.path.join(self.path, '1.su_first_trace')
         su = readSU(file)
         su[0].stats.starttime = new_date
         writeSU(su, outfile)
         su_new = readSU(outfile)
     self.assertEqual(new_date, su_new[0].stats.starttime)
Exemple #33
0
 def test_readingDate(self):
     """
     Reads one file with a set date. The date has been read with SeisView 2
     by the DMNG.
     """
     # Date as read by SeisView 2.
     date = UTCDateTime(year=2005, julday=353, hour=15, minute=7, second=54)
     file = os.path.join(self.path, '1.sgy_first_trace')
     segy = readSEGY(file)
     self.assertEqual(date, segy[0].stats.starttime)
     # The same with the Seismic Unix file.
     file = os.path.join(self.path, '1.su_first_trace')
     su = readSU(file)
     self.assertEqual(date, su[0].stats.starttime)
Exemple #34
0
 def test_settingDeltaandSamplingRateinStats(self):
     """
     Just checks if the delta and sampling rate attributes are correctly
     set.
     Testing the delta value is enough because the stats attribute takes
     care that delta/sampling rate always match.
     """
     file = os.path.join(self.path, '1.sgy_first_trace')
     segy = readSEGY(file)
     self.assertEqual(segy[0].stats.delta, 250E-6)
     # The same with the Seismic Unix file.
     file = os.path.join(self.path, '1.su_first_trace')
     su = readSU(file)
     self.assertEqual(su[0].stats.delta, 250E-6)
Exemple #35
0
 def test_writingModifiedDate(self):
     """
     Tests if the date in Trace.stats.starttime is correctly written in SU
     and SEGY files.
     """
     # Define new date!
     new_date = UTCDateTime(2010, 7, 7, 2, 2, 2)
     with NamedTemporaryFile() as tf:
         outfile = tf.name
         # Test for SEGY.
         file = os.path.join(self.path, 'example.y_first_trace')
         segy = readSEGY(file)
         segy[0].stats.starttime = new_date
         writeSEGY(segy, outfile)
         segy_new = readSEGY(outfile)
         self.assertEqual(new_date, segy_new[0].stats.starttime)
         # Test for SU.
         file = os.path.join(self.path, '1.su_first_trace')
         su = readSU(file)
         su[0].stats.starttime = new_date
         writeSU(su, outfile)
         su_new = readSU(outfile)
     self.assertEqual(new_date, su_new[0].stats.starttime)
Exemple #36
0
 def test_settingDeltaandSamplingRateinStats(self):
     """
     Just checks if the delta and sampling rate attributes are correctly
     set.
     Testing the delta value is enough because the stats attribute takes
     care that delta/sampling rate always match.
     """
     file = os.path.join(self.path, '1.sgy_first_trace')
     segy = readSEGY(file)
     self.assertEqual(segy[0].stats.delta, 250E-6)
     # The same with the Seismic Unix file.
     file = os.path.join(self.path, '1.su_first_trace')
     su = readSU(file)
     self.assertEqual(su[0].stats.delta, 250E-6)
Exemple #37
0
 def test_readingDate(self):
     """
     Reads one file with a set date. The date has been read with SeisView 2
     by the DMNG.
     """
     # Date as read by SeisView 2.
     date = UTCDateTime(year=2005, julday=353, hour=15, minute=7, second=54)
     file = os.path.join(self.path, '1.sgy_first_trace')
     segy = readSEGY(file)
     self.assertEqual(date, segy[0].stats.starttime)
     # The same with the Seismic Unix file.
     file = os.path.join(self.path, '1.su_first_trace')
     su = readSU(file)
     self.assertEqual(date, su[0].stats.starttime)
Exemple #38
0
 def test_notMatchingDataEncodingAndDtypeRaises(self):
     """
     obspy.segy does not automatically convert to the corresponding dtype.
     """
     encodings = [1, 2, 3, 5]
     # The file uses IBM data encoding.
     file = os.path.join(self.path, "ld0042_file_00018.sgy_first_trace")
     st = readSEGY(file)
     # Use float64 as the wrong encoding in every case.
     st[0].data = np.require(st[0].data, "float64")
     out_file = NamedTemporaryFile().name
     # Loop over all encodings.
     for data_encoding in encodings:
         self.assertRaises(SEGYCoreWritingError, writeSEGY, st, out_file, data_encoding=data_encoding)
     os.remove(out_file)
Exemple #39
0
 def test_writingStarttimeTimestamp0(self):
     """
     If the starttime of the Trace is UTCDateTime(0) it will be interpreted
     as a missing starttime is not written. Test if this holds True.
     """
     file = os.path.join(self.path, '1.sgy_first_trace')
     # This file has a set date!
     with open(file, 'rb') as f:
         f.seek(3600 + 156, 0)
         date_time = f.read(10)
     year, julday, hour, minute, second = unpack('>5h', date_time)
     self.assertEqual([year == 2005, julday == 353, hour == 15, minute == 7,
                       second == 54], 5 * [True])
     # Read and set zero time.
     segy = readSEGY(file)
     segy[0].stats.starttime = UTCDateTime(0)
     with NamedTemporaryFile() as tf:
         outfile = tf.name
         writeSEGY(segy, outfile)
         # Check the new date.
         with open(outfile, 'rb') as f:
             f.seek(3600 + 156, 0)
             date_time = f.read(10)
     year, julday, hour, minute, second = unpack('>5h', date_time)
     self.assertEqual([year == 0, julday == 0, hour == 0, minute == 0,
                       second == 0], 5 * [True])
     # The same for SU.
     file = os.path.join(self.path, '1.su_first_trace')
     # This file has a set date!
     with open(file, 'rb') as f:
         f.seek(156, 0)
         date_time = f.read(10)
     year, julday, hour, minute, second = unpack('<5h', date_time)
     self.assertEqual([year == 2005, julday == 353, hour == 15, minute == 7,
                       second == 54], 5 * [True])
     # Read and set zero time.
     su = readSU(file)
     su[0].stats.starttime = UTCDateTime(0)
     with NamedTemporaryFile() as tf:
         outfile = tf.name
         writeSU(su, outfile)
         # Check the new date.
         with open(outfile, 'rb') as f:
             f.seek(156, 0)
             date_time = f.read(10)
     year, julday, hour, minute, second = unpack('<5h', date_time)
     self.assertEqual([year == 0, julday == 0, hour == 0, minute == 0,
                       second == 0], 5 * [True])
Exemple #40
0
 def test_notMatchingDataEncodingAndDtypeRaises(self):
     """
     obspy.segy does not automatically convert to the corresponding dtype.
     """
     encodings = [1, 2, 3, 5]
     # The file uses IBM data encoding.
     file = os.path.join(self.path, 'ld0042_file_00018.sgy_first_trace')
     st = readSEGY(file)
     # Use float64 as the wrong encoding in every case.
     st[0].data = np.require(st[0].data, 'float64')
     with NamedTemporaryFile() as tf:
         out_file = tf.name
         # Loop over all encodings.
         for data_encoding in encodings:
             self.assertRaises(SEGYCoreWritingError, writeSEGY, st,
                               out_file, data_encoding=data_encoding)
Exemple #41
0
 def test_readingUsingCore(self):
     """
     This tests checks whether or not all necessary information is read
     during reading with core. It actually just assumes the internal
     SEGYFile object, which is thoroughly tested in
     obspy.segy.tests.test_segy, is correct and compared all values to it.
     This seems to be the easiest way to test everything.
     """
     for file, _ in self.files.iteritems():
         file = os.path.join(self.path, file)
         # Read the file with the internal SEGY representation.
         segy_file = readSEGYInternal(file)
         # Read again using core.
         st = readSEGY(file)
         # They all should have length one because all additional traces
         # have been removed.
         self.assertEqual(len(st), 1)
         # Assert the data is the same.
         np.testing.assert_array_equal(segy_file.traces[0].data, st[0].data)
         # Textual header.
         self.assertEqual(segy_file.textual_file_header,
                          st.stats.textual_file_header)
         # Textual_header_encoding.
         self.assertEqual(segy_file.textual_header_encoding,
                          st.stats.textual_file_header_encoding)
         # Endianness.
         self.assertEqual(segy_file.endian, st.stats.endian)
         # Data encoding.
         self.assertEqual(segy_file.data_encoding,
                          st.stats.data_encoding)
         # Test the file and trace binary headers.
         for key, value in \
                 segy_file.binary_file_header.__dict__.iteritems():
             self.assertEqual(getattr(st.stats.binary_file_header,
                              key), value)
         for key, value in \
                 segy_file.traces[0].header.__dict__.iteritems():
             self.assertEqual(getattr(st[0].stats.segy.trace_header, key),
                              value)
    def update(self, transect, flat=False):
        """
        Updates the container data to a profile that intersect the
        transect line.

        Returns nothing. Sets attributes as a side effect.

        Args:
            transect (LineString): A transect line.
            flat (Bool): Reads data into a flat list instead of
                         sorting by files.
        """
        Notice.info("Updating " + self.__class__.__name__)

        self.reset_data()

        # Preprocessing
        prepared = prep(transect.buffer(self.settings['buffer']))

        # Get intersecting points
        points = filter(prepared.contains, self.lookup.keys())

        # Lookup for grouping traces into segy files
        count = 0
        file_lookup = {}
        for point in points:
            meta = self.lookup[point]
            count += 1
            f = meta["segyfile"]
            if f in file_lookup:
                proj_d = transect.project(point)
                if proj_d:
                    file_lookup[f]["pos"].append(proj_d)
                    file_lookup[f]["trace"].append(meta["trace"])
                    file_lookup[f]["point"].append(point)
            else:
                file_lookup[f] = {}
                file_lookup[f]["trace"] = [meta["trace"]]
                file_lookup[f]["pos"] = [transect.project(point)]
                file_lookup[f]["point"] = [point]

        # Read in the chunks from the segy file
        self.files = []
        for segyfile in file_lookup.keys():
            self.files.append(os.path.basename(segyfile))
            segy = readSEGY(segyfile, unpack_trace_headers=True)
            traces = file_lookup[segyfile]["trace"]
            coords = file_lookup[segyfile]["pos"]
            points = file_lookup[segyfile]["point"]

            # Get the sort order.
            idx = sorted(range(len(traces)), key=lambda k: traces[k])
            idx = filter(None, idx)

            coords = [coords[i] for i in idx]
            data = [segy.traces[traces[i]] for i in idx]

            if flat:
                self.data += data
                self.coords += coords
            else:
                self.data.append(data)
                self.coords.append(coords)
Exemple #43
0
def read_model(fname):
    """ Reads a model in segy format and returns it as an array."""

    data = segy.readSEGY(fname)

    return np.array([tr.data for tr in data.traces])
Exemple #44
0
def read_model(fname):
    """ Reads a model in segy format and returns it as an array."""

    data = segy.readSEGY(fname)

    return np.array([tr.data for tr in data.traces])
Exemple #45
0
    def update(self, transect, flat=False):
        """
        Updates the container data to a profile that intersect the
        transect line.

        Returns nothing. Sets attributes as a side effect.

        Args:
            transect (LineString): A transect line.
            flat (Bool): Reads data into a flat list instead of
                         sorting by files.
        """
        Notice.info("Updating " + self.__class__.__name__)

        self.reset_data()

        # Preprocessing
        prepared = prep(transect.buffer(self.settings['buffer']))

        # Get intersecting points
        points = filter(prepared.contains, self.lookup.keys())

        # Lookup for grouping traces into segy files
        count = 0
        file_lookup = {}
        for point in points:
            meta = self.lookup[point]
            count += 1
            f = meta["segyfile"]
            if f in file_lookup:
                proj_d = transect.project(point)
                if proj_d:
                    file_lookup[f]["pos"].append(proj_d)
                    file_lookup[f]["trace"].append(meta["trace"])
                    file_lookup[f]["point"].append(point)
            else:
                file_lookup[f] = {}
                file_lookup[f]["trace"] = [meta["trace"]]
                file_lookup[f]["pos"] = [transect.project(point)]
                file_lookup[f]["point"] = [point]

        # Read in the chunks from the segy file
        self.files = []
        for segyfile in file_lookup.keys():
            self.files.append(os.path.basename(segyfile))
            segy = readSEGY(segyfile, unpack_trace_headers=True)
            traces = file_lookup[segyfile]["trace"]
            coords = file_lookup[segyfile]["pos"]
            points = file_lookup[segyfile]["point"]

            # Get the sort order.
            idx = sorted(range(len(traces)), key=lambda k: traces[k])
            idx = filter(None, idx)

            coords = [coords[i] for i in idx]
            data = [segy.traces[traces[i]] for i in idx]

            if flat:
                self.data += data
                self.coords += coords
            else:
                self.data.append(data)
                self.coords.append(coords)