Example #1
0
    def test_detrend(self):
        """
        Test detrend method of trace
        """
        t = np.arange(10)
        data = 0.1 * t + 1.
        tr = Trace(data=data.copy())

        tr.detrend(type='simple')
        np.testing.assert_array_almost_equal(tr.data, np.zeros(10))

        tr.data = data.copy()
        tr.detrend(type='linear')
        np.testing.assert_array_almost_equal(tr.data, np.zeros(10))

        data = np.zeros(10)
        data[3:7] = 1.

        tr.data = data.copy()
        tr.detrend(type='simple')
        np.testing.assert_almost_equal(tr.data[0], 0.)
        np.testing.assert_almost_equal(tr.data[-1], 0.)

        tr.data = data.copy()
        tr.detrend(type='linear')
        np.testing.assert_almost_equal(tr.data[0], -0.4)
        np.testing.assert_almost_equal(tr.data[-1], -0.4)
Example #2
0
    def test_detrend(self):
        """
        Test detrend method of trace
        """
        t = np.arange(10)
        data = 0.1 * t + 1.
        tr = Trace(data=data.copy())

        tr.detrend(type='simple')
        np.testing.assert_array_almost_equal(tr.data, np.zeros(10))

        tr.data = data.copy()
        tr.detrend(type='linear')
        np.testing.assert_array_almost_equal(tr.data, np.zeros(10))

        data = np.zeros(10)
        data[3:7] = 1.

        tr.data = data.copy()
        tr.detrend(type='simple')
        np.testing.assert_almost_equal(tr.data[0], 0.)
        np.testing.assert_almost_equal(tr.data[-1], 0.)

        tr.data = data.copy()
        tr.detrend(type='linear')
        np.testing.assert_almost_equal(tr.data[0], -0.4)
        np.testing.assert_almost_equal(tr.data[-1], -0.4)
Example #3
0
def Bandpass(datapath):
    # for j in tqdm(range(10),desc = "prosessing"):
    filepath = datapath + "/predict/syn/Z/"
    overpath = datapath + "/predict/syn/Z/"
    os.chdir(filepath)

    for i in range(300):
        st = obspy.read(filepath + str(i) + '.sac')
        tr = st[0]
        tr.filter('bandpass', freqmin=8, freqmax=15, corners=4, zerophase=True)
        # tr.filter('highpass', freq=8, corners=4, zerophase=True)
        tr = (tr.data) / np.max(abs(tr.data))
        sacfile = Trace()
        sacfile.data = tr[:]

        sac = SACTrace.from_obspy_trace(sacfile)
        sac_data = sac.data
        sac.stla = 35
        sac.stlo = 110 + (80 + 72 * 500 + i * 25) / 111000

        sac.delta = 0.0006
        sac.evla = 35
        sac.evlo = 110 + (6568 + 500 * 72) / 111000
        sac.evdp = 0.05
        sac.write(overpath + str(72 * 520 + i) + ".sac")
Example #4
0
def normalization(datapath):
    filepath = datapath + '/predict/syn/Z/'
    for i in tqdm(range(300), desc='processing'):
        st = read(filepath + str(i) + '.sac')
        tr = (st[0].data) / np.max(abs(st[0].data))
        sacfile = Trace()
        sacfile.data = tr[:]
        sacfile.write(filepath + str(i) + ".sac", format="SAC")
Example #5
0
    def test_SacInstCorrection(self):
        # SAC recommends to taper the transfer function if a pure
        # deconvolution is done instead of simulating a different
        # instrument. This test checks the difference between the
        # result from removing the instrument response using SAC or
        # ObsPy. Visual inspection shows that the traces are pretty
        # much identical but differences remain (rms ~ 0.042). Haven't
        # found the cause for those, yet. One possible reason is the
        # floating point arithmetic of SAC vs. the double precision
        # arithmetic of Python. However differences still seem to be
        # too big for that.
        pzf = os.path.join(self.path, 'SAC_PZs_KARC_BHZ')
        sacf = os.path.join(self.path, 'KARC.LHZ.SAC.asc.gz')
        testsacf = os.path.join(self.path, 'KARC_corrected.sac.asc.gz')
        plow = 160.
        phigh = 4.
        fl1 = 1.0 / (plow + 0.0625 * plow)
        fl2 = 1.0 / plow
        fl3 = 1.0 / phigh
        fl4 = 1.0 / (phigh - 0.25 * phigh)
        #Uncomment the following to run the sac-commands
        #that created the testing file
        #if 1:
        #    import subprocess as sp
        #    p = sp.Popen('sac',shell=True,stdin=sp.PIPE)
        #    cd1 = p.stdin
        #    print >>cd1, "r %s"%sacf
        #    print >>cd1, "rmean"
        #    print >>cd1, "rtrend"
        #    print >>cd1, "taper type cosine width 0.03"
        #    print >>cd1, "transfer from polezero subtype %s to none \
        #    freqlimits %f %f %f %f" % (pzf, fl1, fl2, fl3, fl4)
        #    print >>cd1, "w over ./data/KARC_corrected.sac"
        #    print >>cd1, "quit"
        #    cd1.close()
        #    p.wait()

        stats = {'network': 'KA', 'delta': 0.99999988079072466,
                 'station': 'KARC', 'location': 'S1',
                 'starttime': UTCDateTime(2001, 2, 13, 0, 0, 0, 993700),
                 'calib': 1.00868e+09, 'channel': 'BHZ'}
        tr = Trace(np.loadtxt(sacf), stats)

        attach_paz(tr, pzf, tovel=False)
        tr.data = seisSim(tr.data, tr.stats.sampling_rate,
                          paz_remove=tr.stats.paz, remove_sensitivity=False,
                          pre_filt=(fl1, fl2, fl3, fl4))

        data = np.loadtxt(testsacf)

        # import matplotlib.pyplot as plt
        # plt.plot(tr.data)
        # plt.plot(data)
        # plt.show()
        rms = np.sqrt(np.sum((tr.data - data) ** 2) / \
                      np.sum(tr.data ** 2))
        self.assertTrue(rms < 0.0421)
Example #6
0
def compute_DWT(isource, j):
    """Read the results of the simulation in SU file
	and compute the wavelet transform

	Input:
	isource = index of the source
	j = scale at which we run the inversion process"""

    namedir1 = 'Source_' + str(isource + 1)
    os.chdir(namedir1)

    filename_d = '../../Data/data_shot' + str(isource + 1) + '.su'
    filename_s = 'OUTPUT_FILES/Up_file_single.su'
    stream_d = read(filename_d, format='SU')
    stream_s = read(filename_s, format='SU')

    stream_d_DWT = Stream()
    stream_s_DWT = Stream()
    for irec in range(0, nrec):
        trace_d = stream_d[irec].copy()
        trace_s = stream_s[irec].copy()
        # Interpolation: We need the same sampling rate to carry out the DWT
        trace_d.interpolate(sampling_rate=1.0 / dt_ref,
                            starttime=trace_d.stats.starttime,
                            npts=nt_ref)
        trace_s.interpolate(sampling_rate=1.0 / dt_ref,
                            starttime=trace_s.stats.starttime,
                            npts=nt_ref)
        # Discrete Wavelet Transform
        data = trace_d.data
        synthetics = trace_s.data
        (data_DWT, NA_d) = WT(data, nt_ref, j)
        (synthetics_DWT, NA_s) = WT(synthetics, nt_ref, j)
        trace_d_DWT = Trace(data=data_DWT, header=trace_d.stats)
        trace_s_DWT = Trace(data=synthetics_DWT, header=trace_s.stats)
        trace_d_DWT.data = numpy.require(trace_d_DWT.data, dtype=numpy.float32)
        trace_s_DWT.data = numpy.require(trace_s_DWT.data, dtype=numpy.float32)
        stream_d_DWT.append(trace_d_DWT)
        stream_s_DWT.append(trace_s_DWT)
    stream_d_DWT.write('OUTPUT_FILES/data_DWT.su', format='SU', byteorder='<')
    stream_s_DWT.write('OUTPUT_FILES/synthetics_DWT.su',
                       format='SU',
                       byteorder='<')
    os.chdir('..')
Example #7
0
 def test_writeSACXYWithMinimumStats(self):
     """
     Write SACXY with minimal stats header, no inhereted from SAC file
     """
     tr = Trace()
     tr.stats.delta = 0.01
     tr.data = np.arange(0, 3000)
     sac_file = NamedTemporaryFile().name
     tr.write(sac_file, 'SACXY')
     st = read(sac_file)
     os.remove(sac_file)
     self.assertEquals(st[0].stats.delta, 0.01)
     self.assertEquals(st[0].stats.sampling_rate, 100.0)
Example #8
0
 def test_writeSACXYWithMinimumStats(self):
     """
     Write SACXY with minimal stats header, no inhereted from SAC file
     """
     tr = Trace()
     tr.stats.delta = 0.01
     tr.data = np.arange(0, 3000)
     sac_file = NamedTemporaryFile().name
     tr.write(sac_file, "SACXY")
     st = read(sac_file)
     os.remove(sac_file)
     self.assertEquals(st[0].stats.delta, 0.01)
     self.assertEquals(st[0].stats.sampling_rate, 100.0)
Example #9
0
def compute_DWT(isource, j):
	"""Read the results of the simulation in SU file
	and compute the wavelet transform

	Input:
	isource = index of the source
	j = scale at which we run the inversion process"""

	namedir1 = 'Source_' + str(isource + 1)
	os.chdir(namedir1)

	filename_d = '../../Data/data_shot' + str(isource + 1) + '.su'
	filename_s = 'OUTPUT_FILES/Up_file_single.su'
	stream_d = read(filename_d, format='SU')
	stream_s = read(filename_s, format='SU')

	stream_d_DWT = Stream()
	stream_s_DWT = Stream()
	for irec in range(0, nrec):
		trace_d = stream_d[irec].copy()
		trace_s = stream_s[irec].copy()
		# Interpolation: We need the same sampling rate to carry out the DWT
		trace_d.interpolate(sampling_rate=1.0 / dt_ref, starttime=trace_d.stats.starttime, npts=nt_ref)
		trace_s.interpolate(sampling_rate=1.0 / dt_ref, starttime=trace_s.stats.starttime, npts=nt_ref)
		# Discrete Wavelet Transform
		data = trace_d.data
		synthetics = trace_s.data
		(data_DWT, NA_d) = WT(data, nt_ref, j)
		(synthetics_DWT, NA_s) = WT(synthetics, nt_ref, j)
		trace_d_DWT = Trace(data=data_DWT, header=trace_d.stats)
		trace_s_DWT = Trace(data=synthetics_DWT, header=trace_s.stats)
		trace_d_DWT.data = numpy.require(trace_d_DWT.data, dtype=numpy.float32)
		trace_s_DWT.data = numpy.require(trace_s_DWT.data, dtype=numpy.float32)
		stream_d_DWT.append(trace_d_DWT)
		stream_s_DWT.append(trace_s_DWT)
	stream_d_DWT.write('OUTPUT_FILES/data_DWT.su', format='SU', byteorder='<')
	stream_s_DWT.write('OUTPUT_FILES/synthetics_DWT.su', format='SU', byteorder='<')
	os.chdir('..')
Example #10
0
 def test_issue156(self):
     """
     Test case for issue #156.
     """
     #1
     tr = Trace()
     tr.stats.delta = 0.01
     tr.data = np.arange(0, 3000)
     sac_file = NamedTemporaryFile().name
     tr.write(sac_file, 'SAC')
     st = read(sac_file)
     os.remove(sac_file)
     self.assertEquals(st[0].stats.delta, 0.01)
     self.assertEquals(st[0].stats.sampling_rate, 100.0)
     #2
     tr = Trace()
     tr.stats.delta = 0.005
     tr.data = np.arange(0, 2000)
     sac_file = NamedTemporaryFile().name
     tr.write(sac_file, 'SAC')
     st = read(sac_file)
     os.remove(sac_file)
     self.assertEquals(st[0].stats.delta, 0.005)
     self.assertEquals(st[0].stats.sampling_rate, 200.0)
Example #11
0
 def test_issue156(self):
     """
     Test case for issue #156.
     """
     # 1
     tr = Trace()
     tr.stats.delta = 0.01
     tr.data = np.arange(0, 3000)
     sac_file = NamedTemporaryFile().name
     tr.write(sac_file, "SAC")
     st = read(sac_file)
     os.remove(sac_file)
     self.assertEquals(st[0].stats.delta, 0.01)
     self.assertEquals(st[0].stats.sampling_rate, 100.0)
     # 2
     tr = Trace()
     tr.stats.delta = 0.005
     tr.data = np.arange(0, 2000)
     sac_file = NamedTemporaryFile().name
     tr.write(sac_file, "SAC")
     st = read(sac_file)
     os.remove(sac_file)
     self.assertEquals(st[0].stats.delta, 0.005)
     self.assertEquals(st[0].stats.sampling_rate, 200.0)
Example #12
0
 def test_times(self):
     """
     Test if the correct times array is returned for normal traces and
     traces with gaps.
     """
     tr = Trace(data=np.ones(100))
     tr.stats.sampling_rate = 20
     start = UTCDateTime(2000, 1, 1, 0, 0, 0, 0)
     tr.stats.starttime = start
     tm = tr.times()
     self.assertAlmostEquals(tm[-1], tr.stats.endtime - tr.stats.starttime)
     tr.data = np.ma.ones(100)
     tr.data[30:40] = np.ma.masked
     tm = tr.times()
     self.assertTrue(np.alltrue(tr.data.mask == tm.mask))
Example #13
0
 def test_times(self):
     """
     Test if the correct times array is returned for normal traces and
     traces with gaps.
     """
     tr = Trace(data=np.ones(100))
     tr.stats.sampling_rate = 20
     start = UTCDateTime(2000, 1, 1, 0, 0, 0, 0)
     tr.stats.starttime = start
     tm = tr.times()
     self.assertAlmostEquals(tm[-1], tr.stats.endtime - tr.stats.starttime)
     tr.data = np.ma.ones(100)
     tr.data[30:40] = np.ma.masked
     tm = tr.times()
     self.assertTrue(np.alltrue(tr.data.mask == tm.mask))
def time_difference(isource, j):
    """Compute the time difference between data and synthetics

	Input:
	isource = index of the source
	j = scale at which we run the inversion process"""

    namedir1 = 'Source_' + str(isource + 1)
    os.chdir(namedir1)

    filename_d = 'OUTPUT_FILES/data_process.su'
    filename_s = 'OUTPUT_FILES/synthetics_process.su'
    filename_i = 'OUTPUT_FILES/Up_file_single.su'
    stream_d = read(filename_d, format='SU', byteorder='<')
    stream_s = read(filename_s, format='SU', byteorder='<')
    stream_i = read(filename_i, format='SU')

    misfit = 0.0
    stream_adj = Stream()
    for irec in range(0, nrec):
        adj = numpy.zeros(nt_s)
        trace_i = stream_i[irec].copy()
        if irec >= rstart - 1 and irec <= rend - 1:
            trace_d = stream_d[irec].copy()
            trace_s = stream_s[irec].copy()
            if trace_d.data.size != trace_s.data.size:
                raise ValueError(
                    "Data and synthetic signals should have the same length")
            nstep = trace_s.data.size
            adj_temp = numpy.zeros(nt_ref)
            starttime = tstart[j - 1] + irec * 25.0 * sstart[j - 1]
            istart = int(starttime / dt_ref)
            for it in range(0, nstep):
                misfit += 0.5 * numpy.power(
                    f * trace_s.data[it] - trace_d.data[it], 2.0)
                adj_temp[istart + it] = f * trace_s.data[it] - trace_d.data[it]
            trace_adj = Trace(data=adj_temp, header=trace_s.stats)
            trace_adj.interpolate(sampling_rate=1.0 / dt_s,
                                  starttime=trace_adj.stats.starttime,
                                  npts=nt_s)
        else:
            trace_adj = Trace(data=adj, header=trace_i.stats)
        trace_adj.data = numpy.require(trace_adj.data, dtype=numpy.float32)
        stream_adj.append(trace_adj)
    stream_adj.write('SEM/Up_file_single.su.adj', format='SU')
    os.chdir('..')

    return misfit
def time_difference(isource, j):
	"""Compute the time difference between data and synthetics

	Input:
	isource = index of the source
	j = scale at which we run the inversion process"""

	namedir1 = 'Source_' + str(isource + 1)
	os.chdir(namedir1)

	filename_d = 'OUTPUT_FILES/data_process.su'
	filename_s = 'OUTPUT_FILES/synthetics_process.su'
	filename_i = 'OUTPUT_FILES/Up_file_single.su'
	stream_d = read(filename_d, format='SU', byteorder='<')
	stream_s = read(filename_s, format='SU', byteorder='<')
	stream_i = read(filename_i, format='SU')

	misfit = 0.0
	stream_adj = Stream()
	for irec in range(0, nrec):
		adj = numpy.zeros(nt_s)
		trace_i = stream_i[irec].copy()
		if irec >= rstart - 1 and irec <= rend - 1:
			trace_d = stream_d[irec].copy()
			trace_s = stream_s[irec].copy()
			if trace_d.data.size != trace_s.data.size:
				raise ValueError("Data and synthetic signals should have the same length")
			nstep = trace_s.data.size
			adj_temp = numpy.zeros(nt_ref)
			starttime = tstart[j - 1] + irec * 25.0 * sstart[j - 1]
			istart = int(starttime / dt_ref)
			for it in range(0, nstep):
				misfit += 0.5 * numpy.power(f * trace_s.data[it] - trace_d.data[it], 2.0)
				adj_temp[istart + it] = f * trace_s.data[it] - trace_d.data[it]
			trace_adj = Trace(data=adj_temp, header=trace_s.stats)
			trace_adj.interpolate(sampling_rate=1.0 / dt_s, starttime=trace_adj.stats.starttime, npts=nt_s)
		else:
			trace_adj = Trace(data=adj, header=trace_i.stats)
		trace_adj.data = numpy.require(trace_adj.data, dtype=numpy.float32)
		stream_adj.append(trace_adj)
	stream_adj.write('SEM/Up_file_single.su.adj', format='SU')
	os.chdir('..')

	return misfit
Example #16
0
def cumsumsq(trace, normalize=True, copy=True):
    """
    Returns the cumulative sum of the squares of the trace's data, `trace.data**2`

    :param trace: the input :class:`obspy.core.Trace`
    :param normalize: boolean (default: True), whether to normalize the data in [0, 1]
    :return: a Trace representing the cumulative sum of the square of `trace.data`
    """
    data = _cumsumsq(trace.data, normalize=normalize)
    if copy:
        trace = Trace(data, header=trace.stats.copy())
    else:
        trace.data = data
    # copied from obspy Trace to keep track of the modifications
    _add_processing_info(trace,
                         cumsumsq.__name__,
                         normalize=normalize,
                         copy=copy)
    return trace
Example #17
0
    def _createStream(self, starttime, endtime, sampling_rate):
        """
        Helper method to create a Stream object that can be used for testing
        waveform plotting.

        Takes the time frame of the Stream to be created and a sampling rate.
        Any other header information will have to be adjusted on a case by case
        basis. Please remember to use the same sampling rate for one Trace as
        merging and plotting will not work otherwise.

        This method will create a single sine curve to a first approximation
        with superimposed 10 smaller sine curves on it.

        :return: Stream object
        """
        time_delta = endtime - starttime
        number_of_samples = time_delta * sampling_rate + 1
        # Calculate first sine wave.
        curve = np.linspace(0, 2 * np.pi, int(number_of_samples // 2))
        # Superimpose it with a smaller but shorter wavelength sine wave.
        curve = np.sin(curve) + 0.2 * np.sin(10 * curve)
        # To get a thick curve alternate between two curves.
        data = np.empty(number_of_samples)
        # Check if even number and adjust if necessary.
        if number_of_samples % 2 == 0:
            data[0::2] = curve
            data[1::2] = curve + 0.2
        else:
            data[-1] = 0.0
            data[0:-1][0::2] = curve
            data[0:-1][1::2] = curve + 0.2
        tr = Trace()
        tr.stats.starttime = starttime
        tr.stats.sampling_rate = float(sampling_rate)
        # Fill dummy header.
        tr.stats.network = 'BW'
        tr.stats.station = 'OBSPY'
        tr.stats.channel = 'TEST'
        tr.data = data
        return Stream(traces=[tr])
Example #18
0
    def _createStream(self, starttime, endtime, sampling_rate):
        """
        Helper method to create a Stream object that can be used for testing
        waveform plotting.

        Takes the time frame of the Stream to be created and a sampling rate.
        Any other header information will have to be adjusted on a case by case
        basis. Please remember to use the same sampling rate for one Trace as
        merging and plotting will not work otherwise.

        This method will create a single sine curve to a first approximation
        with superimposed 10 smaller sine curves on it.

        :return: Stream object
        """
        time_delta = endtime - starttime
        number_of_samples = time_delta * sampling_rate + 1
        # Calculate first sine wave.
        curve = np.linspace(0, 2 * np.pi, int(number_of_samples // 2))
        # Superimpose it with a smaller but shorter wavelength sine wave.
        curve = np.sin(curve) + 0.2 * np.sin(10 * curve)
        # To get a thick curve alternate between two curves.
        data = np.empty(number_of_samples)
        # Check if even number and adjust if necessary.
        if number_of_samples % 2 == 0:
            data[0::2] = curve
            data[1::2] = curve + 0.2
        else:
            data[-1] = 0.0
            data[0:-1][0::2] = curve
            data[0:-1][1::2] = curve + 0.2
        tr = Trace()
        tr.stats.starttime = starttime
        tr.stats.sampling_rate = float(sampling_rate)
        # Fill dummy header.
        tr.stats.network = 'BW'
        tr.stats.station = 'OBSPY'
        tr.stats.channel = 'TEST'
        tr.data = data
        return Stream(traces=[tr])
Example #19
0
def add_corr(db,station1, station2, filterid, date, time, duration, components, CF, sampling_rate,day=False,ncorr=0):
    output_folder = get_config(db, 'output_folder')
    export_format = get_config(db,'export_format')
    if export_format == "BOTH":
        mseed = True
        sac = True
    elif export_format == "SAC":
        mseed = False
        sac = True
    elif export_format == "MSEED":
        mseed = True
        sac = False
    
    if day:
        path = os.path.join("STACKS","%02i"%filterid,"001_DAYS",components,"%s_%s"%(station1,station2),str(date))
        pair = "%s:%s"%(station1,station2)
        if mseed:
            export_mseed(db, path, pair, components, filterid,CF/ncorr,ncorr)
        if sac:
            export_sac(db, path, pair, components, filterid,CF/ncorr,ncorr)
    
    else:
        file = '%s.cc' % time
        path = os.path.join(output_folder, "%02i"% filterid, station1, station2,  components,  date)
        if not os.path.isdir(path):
            os.makedirs(path)
        
        t = Trace()
        t.data = CF
        t.stats.sampling_rate = sampling_rate
        t.stats.starttime=-float(get_config(db,'maxlag'))
        t.stats.components = components
        # if ncorr != 0:
            # t.stats.location = "%02i"%ncorr
        st = Stream(traces= [t,])
        st.write(os.path.join(path, file),format='mseed')
        del t, st
Example #20
0
def add_corr(db,station1, station2, filterid, date, time, duration, components, CF, sampling_rate, day=False, ncorr=0):
    output_folder = get_config(db, 'output_folder')
    export_format = get_config(db, 'export_format')
    if export_format == "BOTH":
        mseed = True
        sac = True
    elif export_format == "SAC":
        mseed = False
        sac = True
    elif export_format == "MSEED":
        mseed = True
        sac = False

    if day:
        path = os.path.join("STACKS", "%02i" % filterid, "001_DAYS", components, "%s_%s" % (station1, station2), str(date))
        pair = "%s:%s" % (station1, station2)
        if mseed:
            export_mseed(db, path, pair, components, filterid, CF/ncorr, ncorr)
        if sac:
            export_sac(db, path, pair, components, filterid, CF/ncorr, ncorr)

    else:
        file = '%s.cc' % time
        path = os.path.join(output_folder, "%02i" % filterid, station1, station2, components, date)
        if not os.path.isdir(path):
            os.makedirs(path)

        t = Trace()
        t.data = CF
        t.stats.sampling_rate = sampling_rate
        t.stats.starttime = -float(get_config(db, 'maxlag'))
        t.stats.components = components
        # if ncorr != 0:
            # t.stats.location = "%02i"%ncorr
        st = Stream(traces=[t, ])
        st.write(os.path.join(path, file), format='mseed')
        del t, st
Example #21
0
 def getWaveform(*args, **kwargs):
     """
     Retrieves the waveforms and normalizes the graphs
     """
     # Check the two dates.
     try:
         st = UTCDateTime(NV.starttime.get())
     except:
         status_bar.configure(text='Please enter a valid start time.',
                              foreground='red')
         status_bar.update_idletasks()
         return
     try:
         ed = UTCDateTime(NV.endtime.get())
     except:
         status_bar.configure(text='Please enter a valid end time.',
                              foreground='red')
         status_bar.update_idletasks()
         return
     if ed - st <= 0:
         status_bar.configure(
             text='Start time need to be smaller than end time.',
             foreground='red')
         status_bar.update_idletasks()
         return
     now = UTCDateTime()
     if now < st:
         status_bar.configure(text='You cannot plot the future...',
                              foreground='red')
         status_bar.update_idletasks()
         return
     if ed - st > MAX_SPAN:
         status_bar.configure(
             text='Timeframe too large. Maximal %s seconds allowed.' %
             MAX_SPAN,
             foreground='red')
         status_bar.update_idletasks()
         return
     stream_list = []
     if len(NV.selected_list) == 0:
         NV.stream = None
         create_graph()
         return
     status_bar.configure(text='Retrieving data...', foreground='black')
     status_bar.update_idletasks()
     for channel in NV.selected_list:
         # Read the waveform
         start = UTCDateTime(NV.starttime.get())
         end = UTCDateTime(NV.endtime.get())
         splitted = channel.split('.')
         network = splitted[0]
         station = splitted[1]
         location = splitted[2]
         channel = splitted[3]
         try:
             st = SH.client.waveform.getWaveform(network, station, location,
                                                 channel, start, end)
         except:
             trace = Trace(
                 header={
                     'network': network,
                     'station': station,
                     'location': location,
                     'channel': channel,
                     'starttime': start,
                     'endtime': end,
                     'npts': 0,
                     'sampling_rate': 1.0
                 })
             st = Stream(traces=[trace])
         st.merge()
         st.trim(start, end)
         stream_list.append(st)
     st = stream_list[0]
     for _i in xrange(1, len(stream_list)):
         st += stream_list[_i]
     # Merge the Stream and replace all masked values with NaNs.
     st.merge()
     st.sort()
     # Normalize all traces and throw out traces with no data.
     try:
         max_diff = max([trace.data.max() - trace.data.min() for trace in st \
                     if len(trace.data) > 0])
     except:
         pass
     for trace in st:
         if (np.ma.is_masked(trace.data) and not False in trace.data._mask)or\
             len(trace.data) == 0:
             trace.data = np.array([])
         else:
             trace.data = trace.data - trace.data.mean()
             trace.data = trace.data / (max_diff / 2)
     NV.stream = st
     # Get the min. starttime and the max. endtime.
     starttime = UTCDateTime(NV.starttime.get())
     endtime = UTCDateTime(NV.endtime.get())
     for trace in NV.stream:
         if np.ma.is_masked(trace):
             trace = trace.data[trace._mask] = np.NaN
     # Loop over all traces again and fill with NaNs.
     for trace in NV.stream:
         startgaps = int(round((trace.stats.starttime - starttime) * \
                             trace.stats.sampling_rate))
         endgaps = int(round((endtime - trace.stats.endtime) * \
                             trace.stats.sampling_rate))
         print endgaps
         if startgaps or endgaps:
             if startgaps > 0:
                 start = np.empty(startgaps)
                 start[:] = np.NaN
             else:
                 start = []
             if endgaps > 0:
                 end = np.empty(endgaps)
                 end[:] = np.NaN
             else:
                 end = []
             trace.data = np.concatenate([start, trace.data, end])
             trace.stats.npts = trace.data.size
             trace.stats.starttime = UTCDateTime(NV.starttime.get())
             #trace.stats.endtime = UTCDateTime(NV.endtime.get())
     status_bar.configure(text='')
     status_bar.update_idletasks()
     create_graph()
Example #22
0
def conv_traces(tr1, tr2, normal=True):
    """ Convolve two Traces and merge their meta-information

    It convolves the data stored in two :class:`~obspy.core.trace.Trace`
    Objects in frequency domain. If ``normal==True`` the resulting correlation
    data are normalized by a factor of
    :func:`sqrt(||tr1.data||^2 x ||tr2.data||^2)`

    Meta-informations associated to the resulting Trace are obtained through:

        - Merging the original meta-informations of the two input traces
          according to the :func:`~miic.core.corr_fun.combine_stats` function.

        - Adding the original two `Stats` objects to the newly
          created :class:`~obspy.core.trace.Trace` object as:
          >>> conv_tr.stats_tr1 = tr1.stats
          >>> conv_tr.stats_tr2 = tr2.stats
        - Fixing:
          >>> conv_tr.stats['npts'] = '...number of correlation points...'
          >>> conv_tr.stats['starttime'] = tr2.stats['starttime'] -
              tr1.stats['starttime']

    :type tr1: :class:`~obspy.core.trace.Trace`
    :param tr1: First Trace
    :type tr2: :class:`~obspy.core.trace.Trace`
    :param tr2: Second Trace
    :type normal: bool
    :param normal: Normalization flag

    :rtype: :class:`~obspy.core.trace.Trace`
    :return: **conv_tr**: Trace that stores convolved data and meta-information

    """

    if not isinstance(tr1, Trace):
        raise TypeError("tr1 must be an obspy Trace object.")

    if not isinstance(tr2, Trace):
        raise TypeError("tr2 must be an obspy Trace object.")
    
    zerotime = UTCDateTime(1971, 1, 1, 0, 0, 0)
    conv_tr = Trace()

    # extend traces to the next power of 2 of the longest trace
    lt = pow(2, np.ceil(np.log2(np.max([tr1.stats['npts'],
             tr2.stats['npts']]))))
    s1 = extend(tr1.data, method='zeros', length='fixed',size=lt)
    s2 = extend(tr2.data, method='zeros', length='fixed',size=lt)

    # create the combined stats
    conv_tr.stats = combine_stats(tr1, tr2)
    conv_tr.stats_tr1 = tr1.stats
    conv_tr.stats_tr2 = tr2.stats

    conv_tr.stats_tr1.npts = min(tr1.stats.npts, tr2.stats.npts)
    conv_tr.stats_tr2.npts = min(tr1.stats.npts, tr2.stats.npts)

    if normal:
        denom = np.sqrt(np.dot(s1.astype(np.float64), s1.T) *
                        np.dot(s2.astype(np.float64), s2.T))
    else:
        denom = 1.

    # remaining offset in samples (just remove fractions of samples)
    roffset = np.round((tr2.stats.starttime - tr1.stats.starttime) *
                        tr1.stats.sampling_rate)
    offset = (tr2.stats.starttime - tr1.stats.starttime) * \
        tr1.stats.sampling_rate - roffset
    # remaining offset in seconds
    roffset /= tr1.stats.sampling_rate


    convData = _fftconvolve(s1[::-1], s2, offset)
    convData = np.multiply(convData, (1 / denom))
    
    # set number of samples
    conv_tr.stats['npts'] = convData.shape[0]

    # time lag of the zero position, i.e. lag time of alignent
    t_offset_zeroleg = (float(convData.shape[0]) - 1.) / \
        (2. * tr1.stats.sampling_rate)

    # set starttime
    conv_tr.stats['starttime'] = zerotime - t_offset_zeroleg + \
        roffset

    conv_tr.data = convData

    return conv_tr
Example #23
0
    #else:
    #    for ia, a in enumerate(gminor):
    #  #         if ia >50:continue
    #        binfileA = [a]
    #        binfileB = [gmajor[ia]]
    #        c3 = c3_from_bin_fun(s1, s2, d, binfileA, binfileB)
    #        datac3 = np.vstack((datac3, c3))
    #        print np.shape(datac3)

    #corrc3 = stack(datac3, stack_method='linear')

    t = Trace()
    t.stats.station = 'pc3'
    t.stats.channel = '%03d' % day
    t.stats.sampling_rate = df
    t.data = np.array(c3[::-1])
    t.stats.starttime -= (len(c3) / 2) / df
    try:
        os.makedirs('pc3/EE/%s/%s/' % (s1, s2))
    except:
        pass
    t.write('pc3/EE/%s/%s/pc3.%s.%s.%03d.EE.mseed' % (s1, s2, s1, s2, day),
            format='MSEED')

if __name__ == '__main__':
    #staTarget1 = '235713'
    #staTarget2 = '236977'
    #depth = 0.
    prestackc3(sys.argv[1:])

#EOF
Example #24
0
def _source_specifc_interferogram(trnm1, trnm2, rec1, rec2, src, lags, **kwargs):
    """
    Construct a source-specific interferogram (:math:`C_3`), i.e.,
    interferogram of two :math:`I_2`.

    :param rec1: Name of the first receiver-station.
    :param rec2: Name of the second receiver-station.
    :param src: Name of the source-station.
    :param lags: Use which lags for I3.
    :param dir_src: If save source direction to SAC header.
    """
    dest = get_fnm('C3', rec1, sta2=rec2, sta3=src, lags=lags)

    if PARAM['skip']['C3'] and exists(dest):
        logger.debug(f'{dest} already exists.')
        if PARAM['write']['stack']:
            return dest, read(dest, format='SAC')[0]
        else:
            return None, None

    tr1 = DEST2LAG[trnm1]
    tr2 = DEST2LAG[trnm2]

    # Find common part
    if USE_CW:
        tr1, tr2 = overlap(tr1, tr2, lags)

    # Flip negative lag
    if PARAM['interferometry']['flip_nlag']:
        flip_nlag(tr1, tr2, lags)

    # Do interferometry
    if USE_DW and PARAM['interferometry']['phase_shift']:
        kwa_ps = {
            'delta': tr1.stats.delta,
            'dr': kwargs.get('dr'),
            'per': kwargs.get('phprper'),
            'pv': kwargs.get('phprvel'),
        }
        if CONV:
            C3 = xc_ps(tr1, tr2, **kwa_ps, **PARAM['interferometry'])
        elif CORR:
            xc = my.seis.x_cor(tr1, tr2, **PARAM['interferometry'])
            C3 = pick_lag(xc, kwargs.get('dir_src'))
            C3 = phase_shift(data=C3, **kwa_ps)
    else:
        C3 = my.seis.x_cor(tr1, tr2, **PARAM['interferometry'])
        if USE_DW and CORR and PARAM['interferometry']['pick_lag']:
            C3 = pick_lag(C3, kwargs.get('dir_src'))

    # Make header
    b = - int(np.floor(C3.size/2) / tr1.stats.delta)
    if USE_CW:
        if PARAM['interferometry'].get('Welch', False):
            b = - PARAM['interferometry']['subwin']
    if USE_DW:
        if CONV or PARAM['interferometry']['pick_lag']:
            b = 0
    if PARAM['interferometry']['symmetric'] or CONV:
        nsided = 1
    else:
        nsided = 2

    header = my.seis.sachd(**{
        'b': b,
        'e': int(b + C3.size*tr1.stats.delta),
        'delta': tr1.stats.delta,
        'npts': C3.size,
        'kevnm': rec1,
        'evlo': STNM2LOLA[rec1][0],
        'evla': STNM2LOLA[rec1][1],
        'knetwk': STA2NET[rec1],
        'kstnm': rec2,
        'stlo': STNM2LOLA[rec2][0],
        'stla': STNM2LOLA[rec2][1],
        'dist': kwargs.get('dist', DEF_SHD),
        KEY2SHD['net_rec']: STA2NET[rec2],  # to be consistent with I2
        KEY2SHD['src_sta']: src,
        KEY2SHD['src_net']: STA2NET[src],
        KEY2SHD['nsided']: nsided,
        KEY2SHD['dr']: kwargs.get('dr', DEF_SHD),
        KEY2SHD['theta']: kwargs.get('theta', DEF_SHD),
        KEY2SHD['dir_src']: kwargs.get('dir_src', DEF_SHD),
        KEY2SHD['min_srdist']: kwargs.get('min_srdist', DEF_SHD),
        })

    # Make Trace
    C3_tr = Trace(header=header, data=C3)
    if USE_CW and PARAM['interferometry']['symmetric']:
        C3_tr = my.seis.sym_xc(C3_tr)

    if CONV and PARAM['interferometry'].get('trim_conv', True):
        i = int(PARAM['cut']['te'] / PARAM['cut']['delta']) + 1
        C3_tr.data = C3_tr.data[:i]

    return dest, C3_tr
Example #25
0
tol = max(abs(filt_stf)) / 1000
i_max = np.argmax(abs(filt_stf))
ind = nstep - 1
while (filt_stf[ind] < tol):
    ind -= 1

delay = ind - i_max
print 'Delay used : ' + str(delay)

# 2 : Apply delay and bandpass source time function
delayed_stf = np.zeros(nstep + delay)
delayed_stf[-nstep:] = np.array(stf)

resu = bandpass(delayed_stf, freqmin, freqmax, df, zerophase=True)
t = Trace()
t.data = resu
t.taper(0.005, type='hann')
resu = t.data

if resamp == 1:
    resu = resample(resu[:nstep], int(1.5 * nstep_resamp))[:nstep_resamp]

plt.figure(1)
plt.plot(resu)
#plt.figure(1)
#plt.plot(result)
plt.plot(delayed_stf)
plt.show()
stf = open("my_filtered_stf.txt", "w")

for i in range(0, nstep_resamp):  #nstep + delay):
Example #26
0
    def save_wave(self):

        # Fetch a wave from Ring 0
        wave = self.ring2buff.get_wave(0)

        # if wave is empty return
        if wave == {}:
            return

        # Lets try to buffer with python dictionaries and obspy
        name = wave["station"] + '.' + wave["channel"] + '.' + wave[
            "network"] + '.' + wave["location"]

        if name in self.wave_buffer:

            # Determine max samples for buffer
            max_samp = wave["samprate"] * 60 * self.minutes

            # Create a header:
            wavestats = Stats()
            wavestats.station = wave["station"]
            wavestats.network = wave["network"]
            wavestats.channel = wave["channel"]
            wavestats.location = wave["location"]
            wavestats.sampling_rate = wave["samprate"]
            wavestats.starttime = UTCDateTime(wave['startt'])

            # Create a trace
            wavetrace = Trace(header=wavestats)
            wavetrace.data = wave["data"]

            # Try to append data to buffer, if gap shutdown.
            try:
                self.wave_buffer[name].append(wavetrace,
                                              gap_overlap_check=True)
            except TypeError as err:
                logger.warning(err)
                self.runs = False
            except:
                raise
                self.runs = False

            # Debug data
            if self.debug:
                logger.info("Station Channel combo is in buffer:")
                logger.info(name)
                logger.info("Size:")
                logger.info(self.wave_buffer[name].count())
                logger.debug("Data:")
                logger.debug(self.wave_buffer[name])

        else:
            # First instance of data in buffer, create a header:
            wavestats = Stats()
            wavestats.station = wave["station"]
            wavestats.network = wave["network"]
            wavestats.channel = wave["channel"]
            wavestats.location = wave["location"]
            wavestats.sampling_rate = wave["samprate"]
            wavestats.starttime = UTCDateTime(wave['startt'])

            # Create a trace
            wavetrace = Trace(header=wavestats)
            wavetrace.data = wave["data"]

            # Create a RTTrace
            rttrace = RtTrace(int(self.minutes * 60))
            self.wave_buffer[name] = rttrace

            # Append data
            self.wave_buffer[name].append(wavetrace, gap_overlap_check=True)

            # Debug data
            if self.debug:
                logger.info("First instance of station/channel:")
                logger.info(name)
                logger.info("Size:")
                logger.info(self.wave_buffer[name].count())
                logger.debug("Data:")
                logger.debug(self.wave_buffer[name])
Example #27
0
    def test_SacInstCorrection(self):
        # SAC recommends to taper the transfer function if a pure
        # deconvolution is done instead of simulating a different
        # instrument. This test checks the difference between the
        # result from removing the instrument response using SAC or
        # ObsPy. Visual inspection shows that the traces are pretty
        # much identical but differences remain (rms ~ 0.042). Haven't
        # found the cause for those, yet. One possible reason is the
        # floating point arithmetic of SAC vs. the double precision
        # arithmetic of Python. However differences still seem to be
        # too big for that.
        pzf = os.path.join(self.path, 'SAC_PZs_KARC_BHZ')
        sacf = os.path.join(self.path, 'KARC.LHZ.SAC.asc.gz')
        testsacf = os.path.join(self.path, 'KARC_corrected.sac.asc.gz')
        plow = 160.
        phigh = 4.
        fl1 = 1.0 / (plow + 0.0625 * plow)
        fl2 = 1.0 / plow
        fl3 = 1.0 / phigh
        fl4 = 1.0 / (phigh - 0.25 * phigh)
        #Uncomment the following to run the sac-commands
        #that created the testing file
        #if 1:
        #    import subprocess as sp
        #    p = sp.Popen('sac',shell=True,stdin=sp.PIPE)
        #    cd1 = p.stdin
        #    print >>cd1, "r %s"%sacf
        #    print >>cd1, "rmean"
        #    print >>cd1, "rtrend"
        #    print >>cd1, "taper type cosine width 0.03"
        #    print >>cd1, "transfer from polezero subtype %s to none \
        #    freqlimits %f %f %f %f" % (pzf, fl1, fl2, fl3, fl4)
        #    print >>cd1, "w over ./data/KARC_corrected.sac"
        #    print >>cd1, "quit"
        #    cd1.close()
        #    p.wait()

        stats = {
            'network': 'KA',
            'delta': 0.99999988079072466,
            'station': 'KARC',
            'location': 'S1',
            'starttime': UTCDateTime(2001, 2, 13, 0, 0, 0, 993700),
            'calib': 1.00868e+09,
            'channel': 'BHZ'
        }
        tr = Trace(np.loadtxt(sacf), stats)

        attach_paz(tr, pzf, tovel=False)
        tr.data = seisSim(tr.data,
                          tr.stats.sampling_rate,
                          paz_remove=tr.stats.paz,
                          remove_sensitivity=False,
                          pre_filt=(fl1, fl2, fl3, fl4))

        data = np.loadtxt(testsacf)

        # import matplotlib.pyplot as plt
        # plt.plot(tr.data)
        # plt.plot(data)
        # plt.show()
        rms = np.sqrt(np.sum((tr.data - data) ** 2) / \
                      np.sum(tr.data ** 2))
        self.assertTrue(rms < 0.0421)
Example #28
0
def conv_traces(tr1, tr2, normal=True):
    """ Convolve two Traces and merge their meta-information

    It convolves the data stored in two :class:`~obspy.core.trace.Trace`
    Objects in frequency domain. If ``normal==True`` the resulting correlation
    data are normalized by a factor of
    :func:`sqrt(||tr1.data||^2 x ||tr2.data||^2)`

    Meta-informations associated to the resulting Trace are obtained through:

        - Merging the original meta-informations of the two input traces
          according to the :func:`~miic.core.corr_fun.combine_stats` function.

        - Adding the original two `Stats` objects to the newly
          created :class:`~obspy.core.trace.Trace` object as:
          >>> conv_tr.stats_tr1 = tr1.stats
          >>> conv_tr.stats_tr2 = tr2.stats
        - Fixing:
          >>> conv_tr.stats['npts'] = '...number of correlation points...'
          >>> conv_tr.stats['starttime'] = tr2.stats['starttime'] -
              tr1.stats['starttime']

    :type tr1: :class:`~obspy.core.trace.Trace`
    :param tr1: First Trace
    :type tr2: :class:`~obspy.core.trace.Trace`
    :param tr2: Second Trace
    :type normal: bool
    :param normal: Normalization flag

    :rtype: :class:`~obspy.core.trace.Trace`
    :return: **conv_tr**: Trace that stores convolved data and meta-information

    """

    if not isinstance(tr1, Trace):
        raise TypeError("tr1 must be an obspy Trace object.")

    if not isinstance(tr2, Trace):
        raise TypeError("tr2 must be an obspy Trace object.")
    
    zerotime = UTCDateTime(1971, 1, 1, 0, 0, 0)
    conv_tr = Trace()

    # extend traces to the next power of 2 of the longest trace
    lt = pow(2, np.ceil(np.log2(np.max([tr1.stats['npts'],
             tr2.stats['npts']]))))
    s1 = extend(tr1.data, method='zeros', length='fixed',size=lt)
    s2 = extend(tr2.data, method='zeros', length='fixed',size=lt)

    # create the combined stats
    conv_tr.stats = combine_stats(tr1, tr2)
    conv_tr.stats_tr1 = tr1.stats
    conv_tr.stats_tr2 = tr2.stats

    conv_tr.stats_tr1.npts = min(tr1.stats.npts, tr2.stats.npts)
    conv_tr.stats_tr2.npts = min(tr1.stats.npts, tr2.stats.npts)

    if normal:
        denom = np.sqrt(np.dot(s1.astype(np.float64), s1.T) *
                        np.dot(s2.astype(np.float64), s2.T))
    else:
        denom = 1.

    # remaining offset in samples (just remove fractions of samples)
    roffset = np.round((tr2.stats.starttime - tr1.stats.starttime) *
                        tr1.stats.sampling_rate)
    offset = (tr2.stats.starttime - tr1.stats.starttime) * \
        tr1.stats.sampling_rate - roffset
    # remaining offset in seconds
    roffset /= tr1.stats.sampling_rate


    convData = _fftconvolve(s1[::-1], s2, offset)
    convData = np.multiply(convData, (1 / denom))
    
    # set number of samples
    conv_tr.stats['npts'] = convData.shape[0]

    # time lag of the zero position, i.e. lag time of alignent
    t_offset_zeroleg = (float(convData.shape[0]) - 1.) / \
        (2. * tr1.stats.sampling_rate)

    # set starttime
    conv_tr.stats['starttime'] = zerotime - t_offset_zeroleg + \
        roffset

    conv_tr.data = convData

    return conv_tr
Example #29
0
def add_corr(session, station1, station2, filterid, date, time, duration,
             components, CF, sampling_rate, day=False, ncorr=0):
    """
    Adds a CCF to the data archive on disk.
    
    :type session: :class:`sqlalchemy.orm.session.Session`
    :param session: A :class:`~sqlalchemy.orm.session.Session` object, as
        obtained by :func:`connect`
    :type station1: str
    :param station1: The name of station 1 (formatted NET.STA)
    :type station2: str
    :param station2: The name of station 2 (formatted NET.STA)
    :type filterid: int
    :param filterid: The ID (ref) of the filter
    :type date: datetime.date or str
    :param date: The date of the CCF
    :type time: datetime.time or str
    :param time: The time of the CCF
    :type duration: float
    :param duration: The total duration of the exported CCF
    :type components: str
    :param components: The name of the components used (ZZ, ZR, ...)
    :type sampling_rate: float
    :param sampling_rate: The sampling rate of the exported CCF
    :type day: bool
    :param day: Whether this function is called to export a daily stack (True)
        or each CCF (when keep_all parameter is set to True in the
        configuration). Defaults to True.
    :type ncorr: int
    :param ncorr: Number of CCF that have been stacked for this CCF.
    """

    output_folder = get_config(session, 'output_folder')
    export_format = get_config(session, 'export_format')
    sac, mseed = False, False
    if export_format == "BOTH":
        mseed = True
        sac = True
    elif export_format == "SAC":
        sac = True
    elif export_format == "MSEED":
        mseed = True

    if day:
        path = os.path.join("STACKS", "%02i" % filterid, "001_DAYS", components,
                            "%s_%s" % (station1, station2), str(date))
        pair = "%s:%s" % (station1, station2)
        if mseed:
            export_mseed(session, path, pair, components, filterid, CF,
                         ncorr)
        if sac:
            export_sac(session, path, pair, components, filterid, CF,
                       ncorr)

    else:
        file = '%s.cc' % time
        path = os.path.join(output_folder, "%02i" % filterid, station1,
                            station2, components, date)
        if not os.path.isdir(path):
            os.makedirs(path)

        t = Trace()
        t.data = CF
        t.stats.sampling_rate = sampling_rate
        t.stats.starttime = -float(get_config(session, 'maxlag'))
        t.stats.components = components
        # if ncorr != 0:
            # t.stats.location = "%02i"%ncorr
        st = Stream(traces=[t, ])
        st.write(os.path.join(path, file), format='mseed')
        del t, st
Example #30
0
def stream_collapse_tr(st):

    if not isinstance(st, Stream):
        raise InputError("'st' must be a 'obspy.core.stream.Stream' object")

    stream_new = Stream()
    # Generate sorted list of traces (no copy)
    # Sort order, id, starttime, endtime
    ids = []
    for tr in st:
        if not tr.id in ids:
            ids.append(tr.id)
    for id in ids:
        print "new_trace id: %s" % id
        tr_new = Trace()
        tr_new.data = np.zeros(st[0].data.shape)
#        tr_new.stats = {}
        tr_new.stats_tr1 = {}
        tr_new.stats_tr2 = {}
        starttime1_list = []
        starttime2_list = []
        endtime1_list = []
        endtime2_list = []
        n_tr = 0
        for tr in st:
            if tr.id == id:
                print tr.id
                if len(tr_new.data) != len(tr.data):
                    lp = len(tr_new.data) - len(tr.data)
                    print "lp: %d" % lp
                    if lp > 0:
                        left = np.ceil(lp / 2)
                        right = lp - left
                        cdata = np.append(np.zeros(left, dtype=tr.data.dtype),
                                          tr.data)
                        tr.data = np.append(cdata,
                                            np.zeros(right,
                                                     dtype=tr.data.dtype))
                    else:
                        lp = -lp
                        left = np.ceil(lp / 2)
                        right = lp - left
                        tr.data = tr.data[left:-right]
                    print "len tr: %d" % len(tr)
                    print "len tr_new: % d" % len(tr_new)
                tr_new.data += tr.data
                n_tr += 1
                starttime1_list.append(tr.stats_tr1.starttime)
                starttime2_list.append(tr.stats_tr2.starttime)
                endtime1_list.append(tr.stats_tr1.endtime)
                endtime2_list.append(tr.stats_tr2.endtime)

                tr_new.stats.update(tr.stats)
                tr_new.stats_tr1.update(tr.stats_tr1)
                tr_new.stats_tr2.update(tr.stats_tr2)
        tr_new.data /= n_tr
        tr_new.stats['starttime1'] = starttime1_list
        tr_new.stats['starttime2'] = starttime2_list
        tr_new.stats['endtime1'] = endtime1_list
        tr_new.stats['endtime2'] = endtime2_list
        stream_new.append(tr_new)

    return stream_new
Example #31
0
def kutec_read(fname):
    """ Read the K-UTec proprietary file format.

    Read data in the K-UTec specific IMC FAMOS format into a stream object.
    As there is no obvious station information in the data file
    Network is set to KU and Station is set to the first five letters of the
    filename.

    :parameters:
    ------------
    fname : string
        path to the file containing the data to be read

    .. rubric:: Returns

    st : obspy.core.Stream object
        Obspy stream object containing the data

    """
    tr = Trace()

    line = []
    keys = {}
    f = open(fname, 'r')
    char = f.read(1)  # read leading '|'
    while char == '|':
        key = []
        cnt = 0
        while 1:
            key.append(f.read(1))
            if key[-1] == ',':
                cnt += 1
            if cnt == 3:
                break
        tkeys = string.split(string.join(key, ''), ',')
        key.append(f.read(int(tkeys[2])))
        keyline = string.join(key, '')
        f.read(1)  # read terminating ';'
        char = f.read(1)  # read leading '|'
        # print char
        while (char == '\r') or (char == '\n'):
            char = f.read(1)  # read leading '|'
        #    print char
        keyval = keyline.split(',')
        # ######
        # # in the post 20120619 version files there are leading
        # linefeed in the key (\n), remove them here
        if keyval[0].startswith('\n|'):
            print "does this happen", keyval
            keyval[0] = keyval[0][2:]

        if keyval[0] == 'CF':
            keys[keyval[0]] = {}
            keys[keyval[0]]['Dateiformat'] = int(keyval[1])
            keys[keyval[0]]['Keylaenge'] = int(keyval[2])
            keys[keyval[0]]['Prozessor'] = int(keyval[3])
        elif keyval[0] == 'CK':
            keys[keyval[0]] = {}
            keys[keyval[0]]['Version'] = int(keyval[1])
            keys[keyval[0]]['Lang'] = int(keyval[2])
            keys[keyval[0]]['Dump'] = keyval[3]
            keys[keyval[0]]['Abgeschlossen'] = int(keyval[3])
            if keys[keyval[0]]['Abgeschlossen'] != 1:
                print "%s %s = %s not implemented." % (keyval[0], \
                        'Abgeschlossen', keys[keyval[0]]['DirekteFolgeAnzahl'])
        elif keyval[0] == 'NO':
            keys[keyval[0]] = {}
            keys[keyval[0]]['Version'] = int(keyval[1])
            keys[keyval[0]]['Lang'] = int(keyval[2])
            keys[keyval[0]]['Ursprung'] = int(keyval[3])
            keys[keyval[0]]['NameLang'] = int(keyval[4])
            keys[keyval[0]]['Name'] = keyval[5]
            keys[keyval[0]]['KommLang'] = int(keyval[6])
            if keys[keyval[0]]['KommLang']:
                keys[keyval[0]]['Kommemtar'] = keyval[7]
        elif keyval[0] == 'CP':
            keys[keyval[0]] = {}
            keys[keyval[0]]['Version'] = int(keyval[1])
            keys[keyval[0]]['Lang'] = int(keyval[2])
            keys[keyval[0]]['BufferReferenz'] = int(keyval[3])
            keys[keyval[0]]['Bytes'] = int(keyval[4])  # Bytes fuer
                                                        # einen Messwert
            keys[keyval[0]]['ZahlenFormat'] = int(keyval[5])
            keys[keyval[0]]['SignBits'] = int(keyval[6])
            keys[keyval[0]]['Maske'] = int(keyval[7])
            keys[keyval[0]]['Offset'] = int(keyval[8])
            keys[keyval[0]]['DirekteFolgeAnzahl'] = int(keyval[9])
            keys[keyval[0]]['AbstandBytes'] = int(keyval[10])
            if keys[keyval[0]]['DirekteFolgeAnzahl'] != 1:
                print "%s %s = %s not implemented." % (keyval[0], \
                   'DirekteFolgeAnzahl', keys[keyval[0]]['DirekteFolgeAnzahl'])
                break

        elif keyval[0] == 'Cb':
            keys[keyval[0]] = {}
            keys[keyval[0]]['Version'] = int(keyval[1])
            keys[keyval[0]]['Lang'] = int(keyval[2])
            keys[keyval[0]]['AnzahlBufferInKey'] = int(keyval[3])
            if keys[keyval[0]]['AnzahlBufferInKey'] != 1:
                print "%s %s = %d not implemented." % (keyval[0], \
                    'AnzahlBufferInKey', keys[keyval[0]]['AnzahlBufferInKey'])
                break
            keys[keyval[0]]['BytesInUserInfo'] = int(keyval[4])
            keys[keyval[0]]['BufferReferenz'] = int(keyval[5])
            keys[keyval[0]]['IndexSampleKey'] = int(keyval[6])
            keys[keyval[0]]['OffsetBufferInSampleKey'] = int(keyval[7])
            if keys[keyval[0]]['OffsetBufferInSampleKey'] != 0:
                print "%s %s = %d not implemented." % (keyval[0], \
                                    'OffsetBufferInSampleKey', \
                                    keys[keyval[0]]['OffsetBufferInSampleKey'])
                break
            keys[keyval[0]]['BufferLangBytes'] = int(keyval[8])
            keys[keyval[0]]['OffsetFirstSampleInBuffer'] = int(keyval[9])
            if keys[keyval[0]]['OffsetFirstSampleInBuffer'] != 0:
                print "%s %s = %d not implemented." % (keyval[0], \
                                'OffsetFirstSampleInBuffer', \
                                keys[keyval[0]]['OffsetFirstSampleInBuffer'])
                break
            keys[keyval[0]]['BufferFilledBytes'] = int(keyval[10])
            keys[keyval[0]]['x0'] = float(keyval[12])
            keys[keyval[0]]['Addzeit'] = float(keyval[13])
            if keys[keyval[0]]['BytesInUserInfo']:
                keys[keyval[0]]['UserInfo'] = int(keyval[14])
        elif keyval[0] == 'CS':
            keys[keyval[0]] = {}
            keys[keyval[0]]['Version'] = int(keyval[1])
            keys[keyval[0]]['Lang'] = int(keyval[2])
            keys[keyval[0]]['AnzahlBufferInKey'] = int(keyval[3])
            tmp = string.join(keyval[4:], ',')
            keys[keyval[0]]['Rohdaten'] = tmp

            npts = keys['Cb']['BufferFilledBytes'] / keys['CP']['Bytes']
            tr.stats['npts'] = npts
            # allocate array
            tr.data = np.ndarray(npts, dtype=float)
            # treat different number formats
            if keys['CP']['ZahlenFormat'] == 4:
                tmp = np.fromstring(keys['CS']['Rohdaten'], dtype='uint8', \
                                count=npts * 2)
                tr.data = (tmp[0::2].astype(float) + \
                       (tmp[1::2].astype(float) * 256))
                tr.data[np.nonzero(tr.data > 32767)] -= 65536
            elif keys['CP']['ZahlenFormat'] == 8:
                tr.data = np.fromstring(keys['CS']['Rohdaten'],
                                        dtype='float64',
                                        count=npts)
            else:
                print "%s %s = %d not implemented." % (keyval[0], \
                             'ZahlenFormat', keys[keyval[0]]['ZahlenFormat'])
                break

        elif keyval[0] == 'NT':
            keys[keyval[0]] = {}
            keys[keyval[0]]['Version'] = int(keyval[1])
            keys[keyval[0]]['Lang'] = int(keyval[2])
            keys[keyval[0]]['Tag'] = int(keyval[3])
            keys[keyval[0]]['Monat'] = int(keyval[4])
            keys[keyval[0]]['Jahr'] = int(keyval[5])
            keys[keyval[0]]['Stunden'] = int(keyval[6])
            keys[keyval[0]]['Minuten'] = int(keyval[7])
            keys[keyval[0]]['Sekunden'] = float(keyval[8])
            tr.stats['starttime'] = UTCDateTime(keys[keyval[0]]['Jahr'], \
                                                keys[keyval[0]]['Monat'], \
                                                keys[keyval[0]]['Tag'], \
                                                keys[keyval[0]]['Stunden'], \
                                                keys[keyval[0]]['Minuten'], \
                                                keys[keyval[0]]['Sekunden'])
        elif keyval[0] == 'CD':
            keys[keyval[0]] = {}
            keys[keyval[0]]['Version'] = int(keyval[1])
            keys[keyval[0]]['Lang'] = int(keyval[2])
            keys[keyval[0]]['dx'] = float(keyval[3])
            tr.stats['delta'] = keys[keyval[0]]['dx']
            keys[keyval[0]]['kalibiert'] = int(keyval[4])
            if keys[keyval[0]]['kalibiert'] != 1:
                print "%s %s = %d not implemented." % \
                    (keyval[0], 'kalibiert',
                     keys[keyval[0]]['kalibiert'])
                break
            keys[keyval[0]]['EinheitLang'] = int(keyval[5])
            keys[keyval[0]]['Einheit'] = keyval[6]

            if keys[keyval[0]]['Version'] == 2:
                keys[keyval[0]]['Reduktion'] = int(keyval[7])
                keys[keyval[0]]['InMultiEvents'] = int(keyval[8])
                keys[keyval[0]]['SortiereBuffer'] = int(keyval[9])
                keys[keyval[0]]['x0'] = float(keyval[10])
                keys[keyval[0]]['PretriggerVerwendung'] = int(keyval[11])
            if keys[keyval[0]]['Version'] == 1:
                keys[keyval[0]]['Reduktion'] = ''
                keys[keyval[0]]['InMultiEvents'] = ''
                keys[keyval[0]]['SortiereBuffer'] = ''
                keys[keyval[0]]['x0'] = ''
                keys[keyval[0]]['PretriggerVerwendung'] = 0

        elif keyval[0] == 'CR':
            keys[keyval[0]] = {}
            keys[keyval[0]]['Version'] = int(keyval[1])
            keys[keyval[0]]['Lang'] = int(keyval[2])
            keys[keyval[0]]['Transformieren'] = int(keyval[3])
            keys[keyval[0]]['Faktor'] = float(keyval[4])
            keys[keyval[0]]['Offset'] = float(keyval[5])
            keys[keyval[0]]['Kalibriert'] = int(keyval[6])
            keys[keyval[0]]['EinheitLang'] = int(keyval[7])
            keys[keyval[0]]['Einheit'] = keyval[8]
        elif keyval[0] == 'CN':  # station names
            keys[keyval[0]] = {}
            keys[keyval[0]]['Version'] = int(keyval[1])
            keys[keyval[0]]['Lang'] = int(keyval[2])
            keys[keyval[0]]['IndexGruppe'] = int(keyval[3])
            keys[keyval[0]]['IndexBit'] = int(keyval[5])
            keys[keyval[0]]['NameLang'] = int(keyval[6])
            keys[keyval[0]]['Name'] = keyval[7]
            keys[keyval[0]]['KommLang'] = int(keyval[8])
            keys[keyval[0]]['Kommentar'] = keyval[9]
        else:
            keys[keyval[0]] = {}
            keys[keyval[0]]['KeyString'] = keyval[1:]

    # NT key is beginning of measurement (starting of measurement unit)
    # keys['Cb']['Addzeit'] needs to be added to obtain the absolute trigger
    # time

    tr.stats['starttime'] += keys['Cb']['Addzeit']

    # Adjust starttime according to pretrigger (There is some uncertainty
    # about the CD key) to get relative trigger time
    # for CD:Version == 1 always use Cb:x0
    # for CD:Version == 2 only use Cb:x0 if CD:PretriggerVerwendung == 1
    if keys['CD']['Version'] == 1 or \
        (keys['CD']['Version'] == 2 and
         keys['CD']['PretriggerVerwendung'] == 1):
        tr.stats['starttime'] += keys['Cb']['x0']

    if 'CR' in keys:
        if keys['CR']['Transformieren']:
            tr.data = tr.data * keys['CR']['Faktor'] + keys['CR']['Offset']

    f.close()
    # ### Channel naming
    tr.stats['network'] = 'KU'
    tr.stats['location'] = ''
    # ### Pre 20120619 namin convention to extract the station name from the
    # filename
    # tr.stats['station'] = fname[-12:-7]
    # ### Now take the station name from the ICN key
    tr.stats['station'] = keys['CN']['Name'].replace('_', '')
    # ### or construct a name that is consistent with the old filename
    # generated one from the key
    # ### This is is very likely to cause a problem sooner or later.
    # tr.stats['station'] = 'MK%03d' % int(keys['CN']['Name'].split('_')[-1])

    # tr.stats['station'] = keys['CN']['Name'].replace('_','')

    st = Stream()
    st.extend([tr])

    return st
Example #32
0
def readSU(filename,
           headonly=False,
           byteorder=None,
           unpack_trace_headers=False,
           **kwargs):  # @UnusedVariable
    """
    Reads a Seismic Unix (SU) file and returns an ObsPy Stream object.

    .. warning::
        This function should NOT be called directly, it registers via the
        ObsPy :func:`~obspy.core.stream.read` function, call this instead.

    :type filename: str
    :param filename: SU file to be read.
    :type headonly: boolean, optional
    :param headonly: If set to True, read only the header and omit the waveform
        data.
    :type byteorder: ``'<'``, ``'>'``, or ``None``
    :param byteorder: Determines the endianness of the file. Either ``'>'`` for
        big endian or ``'<'`` for little endian. If it is ``None``, it will try
        to autodetect the endianness. The endianness is always valid for the
        whole file. Defaults to ``None``.
    :type unpack_trace_headers: bool, optional
    :param unpack_trace_headers: Determines whether or not all trace header
        values will be unpacked during reading. If ``False`` it will greatly
        enhance performance and especially memory usage with large files. The
        header values can still be accessed and will be calculated on the fly
        but tab completion will no longer work. Look in the headers.py for a
        list of all possible trace header values. Defaults to ``False``.
    :returns: A ObsPy :class:`~obspy.core.stream.Stream` object.

    .. rubric:: Example

    >>> from obspy.core import read
    >>> st = read("/path/to/1.su_first_trace")
    >>> st #doctest: +ELLIPSIS
    <obspy.core.stream.Stream object at 0x...>
    >>> print(st)  #doctest: +ELLIPSIS
    1 Trace(s) in Stream:
    ... | 2005-12-19T15:07:54.000000Z - ... | 4000.0 Hz, 8000 samples
    """
    # Read file to the internal segy representation.
    su_object = readSUFile(filename,
                           endian=byteorder,
                           unpack_headers=unpack_trace_headers)

    # Create the stream object.
    stream = Stream()

    # Get the endianness from the first trace.
    endian = su_object.traces[0].endian
    # Loop over all traces.
    for tr in su_object.traces:
        # Create new Trace object for every segy trace and append to the Stream
        # object.
        trace = Trace()
        stream.append(trace)
        # skip data if headonly is set
        if headonly:
            trace.stats.npts = tr.npts
        else:
            trace.data = tr.data
        trace.stats.su = AttribDict()
        # If all values will be unpacked create a normal dictionary.
        if unpack_trace_headers:
            # Add the trace header as a new attrib dictionary.
            header = AttribDict()
            for key, value in tr.header.__dict__.iteritems():
                setattr(header, key, value)
        # Otherwise use the LazyTraceHeaderAttribDict.
        else:
            # Add the trace header as a new lazy attrib dictionary.
            header = LazyTraceHeaderAttribDict(tr.header.unpacked_header,
                                               tr.header.endian)
        trace.stats.su.trace_header = header
        # Also set the endianness.
        trace.stats.su.endian = endian
        # The sampling rate should be set for every trace. It is a sample
        # interval in microseconds. The only sanity check is that is should be
        # larger than 0.
        tr_header = trace.stats.su.trace_header
        if tr_header.sample_interval_in_ms_for_this_trace > 0:
            trace.stats.delta = \
                    float(tr.header.sample_interval_in_ms_for_this_trace) / \
                    1E6
        # If the year is not zero, calculate the start time. The end time is
        # then calculated from the start time and the sampling rate.
        # 99 is often used as a placeholder.
        if tr_header.year_data_recorded > 0:
            year = tr_header.year_data_recorded
            # The SEG Y rev 0 standard specifies the year to be a 4 digit
            # number.  Before that it was unclear if it should be a 2 or 4
            # digit number. Old or wrong software might still write 2 digit
            # years. Every number <30 will be mapped to 2000-2029 and every
            # number between 30 and 99 will be mapped to 1930-1999.
            if year < 100:
                if year < 30:
                    year += 2000
                else:
                    year += 1900
            julday = tr_header.day_of_year
            julday = tr_header.day_of_year
            hour = tr_header.hour_of_day
            minute = tr_header.minute_of_hour
            second = tr_header.second_of_minute
            trace.stats.starttime = UTCDateTime(year=year,
                                                julday=julday,
                                                hour=hour,
                                                minute=minute,
                                                second=second)
    return stream
Example #33
0
File: core.py Project: egdorf/obspy
def readSU(filename, headonly=False, byteorder=None,
           unpack_trace_headers=False, **kwargs):  # @UnusedVariable
    """
    Reads a Seismic Unix (SU) file and returns an ObsPy Stream object.

    .. warning::
        This function should NOT be called directly, it registers via the
        ObsPy :func:`~obspy.core.stream.read` function, call this instead.

    :type filename: str
    :param filename: SU file to be read.
    :type headonly: boolean, optional
    :param headonly: If set to True, read only the header and omit the waveform
        data.
    :type byteorder: ``'<'``, ``'>'``, or ``None``
    :param byteorder: Determines the endianness of the file. Either ``'>'`` for
        big endian or ``'<'`` for little endian. If it is ``None``, it will try
        to autodetect the endianness. The endianness is always valid for the
        whole file. Defaults to ``None``.
    :type unpack_trace_headers: bool, optional
    :param unpack_trace_headers: Determines whether or not all trace header
        values will be unpacked during reading. If ``False`` it will greatly
        enhance performance and especially memory usage with large files. The
        header values can still be accessed and will be calculated on the fly
        but tab completion will no longer work. Look in the headers.py for a
        list of all possible trace header values. Defaults to ``False``.
    :returns: A ObsPy :class:`~obspy.core.stream.Stream` object.

    .. rubric:: Example

    >>> from obspy.core import read
    >>> st = read("/path/to/1.su_first_trace")
    >>> st #doctest: +ELLIPSIS
    <obspy.core.stream.Stream object at 0x...>
    >>> print(st)  #doctest: +ELLIPSIS
    1 Trace(s) in Stream:
    ... | 2005-12-19T15:07:54.000000Z - ... | 4000.0 Hz, 8000 samples
    """
    # Read file to the internal segy representation.
    su_object = readSUFile(filename, endian=byteorder,
                           unpack_headers=unpack_trace_headers)

    # Create the stream object.
    stream = Stream()

    # Get the endianness from the first trace.
    endian = su_object.traces[0].endian
    # Loop over all traces.
    for tr in su_object.traces:
        # Create new Trace object for every segy trace and append to the Stream
        # object.
        trace = Trace()
        stream.append(trace)
        # skip data if headonly is set
        if headonly:
            trace.stats.npts = tr.npts
        else:
            trace.data = tr.data
        trace.stats.su = AttribDict()
        # If all values will be unpacked create a normal dictionary.
        if unpack_trace_headers:
            # Add the trace header as a new attrib dictionary.
            header = AttribDict()
            for key, value in tr.header.__dict__.iteritems():
                setattr(header, key, value)
        # Otherwise use the LazyTraceHeaderAttribDict.
        else:
            # Add the trace header as a new lazy attrib dictionary.
            header = LazyTraceHeaderAttribDict(tr.header.unpacked_header,
                                               tr.header.endian)
        trace.stats.su.trace_header = header
        # Also set the endianness.
        trace.stats.su.endian = endian
        # The sampling rate should be set for every trace. It is a sample
        # interval in microseconds. The only sanity check is that is should be
        # larger than 0.
        tr_header = trace.stats.su.trace_header
        if tr_header.sample_interval_in_ms_for_this_trace > 0:
            trace.stats.delta = \
                    float(tr.header.sample_interval_in_ms_for_this_trace) / \
                    1E6
        # If the year is not zero, calculate the start time. The end time is
        # then calculated from the start time and the sampling rate.
        # 99 is often used as a placeholder.
        if tr_header.year_data_recorded > 0:
            year = tr_header.year_data_recorded
            # The SEG Y rev 0 standard specifies the year to be a 4 digit
            # number.  Before that it was unclear if it should be a 2 or 4
            # digit number. Old or wrong software might still write 2 digit
            # years. Every number <30 will be mapped to 2000-2029 and every
            # number between 30 and 99 will be mapped to 1930-1999.
            if year < 100:
                if year < 30:
                    year += 2000
                else:
                    year += 1900
            julday = tr_header.day_of_year
            julday = tr_header.day_of_year
            hour = tr_header.hour_of_day
            minute = tr_header.minute_of_hour
            second = tr_header.second_of_minute
            trace.stats.starttime = UTCDateTime(year=year, julday=julday,
                                    hour=hour, minute=minute, second=second)
    return stream
Example #34
0
def makeSynt(st,mti,mo,args):

    from obspy.core import Stream,Trace
    from math import cos,sin,radians
    import numpy as np
    import sys

    mxx = mti[0] 
    myy = mti[1] 
    mxy = mti[2]
    mxz = mti[3]
    myz = mti[4] 
    mzz = mti[5]
    mo  = 1.0*mo 
    MM  = mo/(1e20)
    MM  = 1.0

   
    # initialize
    synt = Stream()
    staz_list = []

    # npts
    npts  = st[0].stats.npts
    delta = st[0].stats.delta


    # aquire station list
    for i in range(len(st)):
        if (st[i].stats.channel == 'tss'): 
           staz_list.append(st[i].stats.station)
    
    # for each station of list: create 3 traces: ver,rad,tan
    # compute time elements for each one and update stats


    # make synt. Repeated 3 time the component loops over npts on syn.data 
    # because of some pointer confusion (mine)
    k=1
    for l in range(len(staz_list)):


        dati  = np.arange(npts)*0.0  #!!! initialize with floats
        az = radians(st[l*10+k].stats.az)

        ###############
        # TAN Component
        syn   = Trace(dati)
        for i in range(npts):

            syn.data[i] =    mxx*0.5*st[l*10+k*0].data[i]*sin(2*az) \
                           - myy*0.5*st[l*10+k*0].data[i]*sin(2*az) \
                           - mxy*1.0*st[l*10+k*0].data[i]*cos(2*az) \
                           - mxz*1.0*st[l*10+k*1].data[i]*sin(1*az) \
                           + myz*1.0*st[l*10+k*1].data[i]*cos(1*az) 

        # apply Mo
        syn.data=syn.data*MM*(-1)

        # update stats
        syn.stats.station = st[l*10].stats.station    
        syn.stats.channel = 'TAN'   
        syn.stats.az      = st[l*10].stats.az    
        syn.stats.baz     = st[l*10].stats.baz    
        syn.stats.dist    = st[l*10].stats.dist    
        syn.stats.gcarc   = st[l*10].stats.gcarc    
        syn.stats.evla    = st[l*10].stats.evla    
        syn.stats.evlo    = st[l*10].stats.evlo    
        syn.stats.stlo    = st[l*10].stats.stlo    
        syn.stats.stla    = st[l*10].stats.stla    
        syn.stats.delta   = delta

        # add to synt stream
        synt.append(syn) 


        ###############
        # RAD Component
        syn   = Trace(dati)
        for i in range(npts):

            syn.data[i] =    mxx*1/6*st[l*10+k*4].data[i]*(+1) \
                           - mxx*0.5*st[l*10+k*2].data[i]*cos(2*az) \
                           + mxx*1/3*st[l*10+k*8].data[i] \
                           + myy*1/6*st[l*10+k*4].data[i]*(+1) \
                           + myy*0.5*st[l*10+k*2].data[i]*cos(2*az) \
                           + myy*1/3*st[l*10+k*8].data[i] \
                           + mzz*1/3*st[l*10+k*8].data[i] \
                           - mzz*1/3*st[l*10+k*4].data[i]*(+1) \
                           - mxy*1.0*st[l*10+k*2].data[i]*sin(2*az) \
                           + mxz*1.0*st[l*10+k*3].data[i]*cos(1*az) \
                           + myz*1.0*st[l*10+k*3].data[i]*sin(1*az)

        # apply Mo
        syn.data=syn.data*MM*(-1)

        # update stats
        syn.stats.station = st[l*10].stats.station    
        syn.stats.channel = 'RAD'   
        syn.stats.az      = st[l*10].stats.az    
        syn.stats.baz     = st[l*10].stats.baz    
        syn.stats.dist    = st[l*10].stats.dist    
        syn.stats.gcarc   = st[l*10].stats.gcarc    
        syn.stats.evla    = st[l*10].stats.evla    
        syn.stats.evlo    = st[l*10].stats.evlo    
        syn.stats.stlo    = st[l*10].stats.stlo    
        syn.stats.stla    = st[l*10].stats.stla    
        syn.stats.delta   = delta

        # add to synt stream
        synt.append(syn) 


        ###############
        # VER Component
        syn   = Trace(dati)
        for i in range(npts):

            syn.data[i] =    mxx*1/6*st[l*10+k*7].data[i] \
                           - mxx*0.5*st[l*10+k*5].data[i]*(+1)*cos(2*az) \
                           + mxx*1/3*st[l*10+k*9].data[i] \
                           + myy*1/6*st[l*10+k*7].data[i] \
                           + myy*0.5*st[l*10+k*5].data[i]*(+1)*cos(2*az) \
                           + myy*1/3*st[l*10+k*9].data[i] \
                           + mzz*1/3*st[l*10+k*9].data[i] \
                           - mzz*1/3*st[l*10+k*7].data[i] \
                           - mxy*1.0*st[l*10+k*5].data[i]*(+1)*sin(2*az) \
                           + mxz*1.0*st[l*10+k*6].data[i]*(+1)*cos(1*az) \
                           + myz*1.0*st[l*10+k*6].data[i]*(+1)*sin(1*az)

        # apply Mo
        syn.data=syn.data*MM*(+1)

        # update stats
        syn.stats.station = st[l*10].stats.station   
        syn.stats.channel = 'VER'   
        syn.stats.az      = st[l*10].stats.az    
        syn.stats.baz     = st[l*10].stats.baz    
        syn.stats.dist    = st[l*10].stats.dist    
        syn.stats.gcarc   = st[l*10].stats.gcarc    
        syn.stats.evla    = st[l*10].stats.evla
        syn.stats.evlo    = st[l*10].stats.evlo
        syn.stats.stlo    = st[l*10].stats.stlo
        syn.stats.stla    = st[l*10].stats.stla
        syn.stats.delta   = delta

        # add to synt stream
        synt.append(syn) 
        
    return synt
Example #35
0
def create_kiknet_acc(recid, path_kiknet_folder, fminNS2, fmaxNS2, fminEW2,
                      fmaxEW2):
    """
    KiK-net acc are stored within Database_small.hdf5 file
    """

    # Import libraries
    import numpy as np
    from obspy.core import Trace, UTCDateTime
    import re
    from obspy.signal import filter

    # desc1 = ""
    # desc2 = ""
    time1 = []
    time2 = []
    inp_acc1 = []
    inp_acc2 = []
    npts1 = []
    npts2 = []
    for i in range(1, 3):
        if i == 1:
            comp = 'EW2'
            fmin = fminEW2
            fmax = fmaxEW2
        elif i == 2:
            comp = 'NS2'
            fmin = fminNS2
            fmax = fmaxNS2

        file_acc = path_kiknet_folder + '/' + str(recid) + '/' + str(
            recid) + '.' + comp
        hdrnames = [
            'Origin Time', 'Lat.', 'Long.', 'Depth. (km)', 'Mag.',
            'Station Code', 'Station Lat.', 'Station Long.',
            'Station Height(m)', 'Record Time', 'Sampling Freq(Hz)',
            'Duration Time(s)', 'Dir.', 'Scale Factor', 'Max. Acc. (gal)',
            'Last Correction', 'Memo.'
        ]
        acc_data = []
        time = []
        with open(file_acc, 'r') as f:
            content = f.readlines()
        counter = 0
        for line in content:
            if counter < 17:
                if not line.startswith(hdrnames[counter]):
                    sys.exit("Expected line to start with %s but got %s " %
                             (hdrnames[counter], line))
                else:
                    flds = line.split()

            if (counter == 0):
                origin_time = flds[2] + ' ' + flds[3]
                origin_time = UTCDateTime.strptime(origin_time,
                                                   '%Y/%m/%d %H:%M:%S')
                # All times are in Japanese standard time which is 9 hours ahead of UTC
                origin_time -= 9 * 3600.

            elif (counter == 1):
                lat = float(flds[1])

            elif (counter == 2):
                lon = float(flds[1])

            elif (counter == 3):
                dp = float(flds[2])

            elif (counter == 4):
                mag = float(flds[1])

            elif (counter == 5):
                stnm = flds[2]

            elif (counter == 6):
                stla = float(flds[2])

            elif (counter == 7):
                stlo = float(flds[2])

            elif (counter == 8):
                stel = float(flds[2])

            elif (counter == 9):
                record_time = flds[2] + ' ' + flds[3]
                # A 15 s delay is added to the record time by the
                # the K-NET and KiK-Net data logger
                record_time = UTCDateTime.strptime(record_time,
                                                   '%Y/%m/%d %H:%M:%S') - 15.0
                # All times are in Japanese standard time which is 9 hours ahead of UTC
                record_time -= 9 * 3600.

            elif (counter == 10):
                freqstr = flds[2]
                m = re.search('[0-9]*', freqstr)
                freq = int(m.group())

            elif (counter == 11):
                duration = float(flds[2])

            elif (counter == 12):
                channel = flds[1].replace('-', '')
                kiknetcomps = {
                    '1': 'NS1',
                    '2': 'EW1',
                    '3': 'UD1',
                    '4': 'NS2',
                    '5': 'EW2',
                    '6': 'UD2'
                }
                if channel.strip() in kiknetcomps.keys(
                ):  # kiknet directions are 1-6
                    channel = kiknetcomps[channel.strip()]

            elif (counter == 13):
                eqn = flds[2]
                num, denom = eqn.split('/')
                num = float(re.search('[0-9]*', num).group())
                denom = float(denom)
                # convert the calibration from gal to m/s^2
                calib = 0.01 * num / denom

            elif (counter == 14):
                accmax = float(flds[3])

            elif (counter == 15):
                last_correction = flds[2] + ' ' + flds[3]
                last_correction = UTCDateTime.strptime(last_correction,
                                                       '%Y/%m/%d %H:%M:%S')
                # All times are in Japanese standard time which is 9 hours ahead of UTC
                last_correction -= 9 * 3600.

            elif counter > 16:
                data = str(line).split()
                for value in data:
                    a = float(value)
                    acc_data.append(a)
            counter = counter + 1

        data = np.array(acc_data)
        tr = Trace(data)
        tr.detrend("linear")
        tr.taper(max_percentage=0.05, type='cosine', side='both')
        filter_order = 4
        pad = np.zeros(int(round(1.5 * filter_order / fmin * freq)))
        tr.data = np.concatenate([pad, tr.data, pad])
        fN = freq / 2
        if fmax < fN:
            tr.data = filter.bandpass(tr.data,
                                      freqmin=fmin,
                                      freqmax=fmax,
                                      df=freq,
                                      corners=4,
                                      zerophase=True)
        else:
            tr.data = filter.highpass(tr.data,
                                      freq=fmin,
                                      df=freq,
                                      corners=4,
                                      zerophase=True)
        tr.data = tr.data[len(pad):len(tr.data) - len(pad)]
        tr.data = tr.data * calib / 9.81  #in g

        npts = len(tr.data)

        time = []
        for j in range(0, npts):
            t = j * 1 / freq
            time.append(t)
        time = np.asarray(time)
        if i == 1:
            inp_acc1 = tr.data
            npts1 = npts
            time1 = time
        if i == 2:
            inp_acc2 = tr.data
            npts2 = npts
            time2 = time
    return time1, time2, inp_acc1, inp_acc2, npts1, npts2
Example #36
0
 def getWaveform(*args, **kwargs):
     """
     Retrieves the waveforms and normalizes the graphs
     """
     # Check the two dates.
     try:
         st = UTCDateTime(NV.starttime.get())
     except:
         status_bar.configure(text='Please enter a valid start time.', foreground='red')
         status_bar.update_idletasks()
         return
     try:
         ed = UTCDateTime(NV.endtime.get())
     except:
         status_bar.configure(text='Please enter a valid end time.', foreground='red')
         status_bar.update_idletasks()
         return
     if ed - st <= 0:
         status_bar.configure(text='Start time need to be smaller than end time.', foreground='red')
         status_bar.update_idletasks()
         return
     now = UTCDateTime()
     if now < st:
         status_bar.configure(text='You cannot plot the future...', foreground='red')
         status_bar.update_idletasks()
         return
     if ed - st > MAX_SPAN:
         status_bar.configure(text='Timeframe too large. Maximal %s seconds allowed.' % MAX_SPAN, foreground='red')
         status_bar.update_idletasks()
         return
     stream_list = []
     if len(NV.selected_list) == 0:
         NV.stream = None
         create_graph()
         return
     status_bar.configure(text='Retrieving data...', foreground='black')
     status_bar.update_idletasks()
     for channel in NV.selected_list:
         # Read the waveform
         start = UTCDateTime(NV.starttime.get())
         end = UTCDateTime(NV.endtime.get())
         splitted = channel.split('.')
         network = splitted[0]
         station = splitted[1]
         location = splitted[2]
         channel = splitted[3]
         try:
             st = SH.client.waveform.getWaveform(network, station, location,
                                             channel, start, end)
         except:
             trace = Trace(header={'network' : network,
                 'station' : station, 'location' : location,
                 'channel' : channel, 'starttime': start,
                 'endtime' : end, 'npts' : 0, 'sampling_rate' : 1.0})
             st = Stream(traces=[trace])
         st.merge()
         st.trim(start, end)
         stream_list.append(st)
     st = stream_list[0]
     for _i in xrange(1, len(stream_list)):
         st += stream_list[_i]
     # Merge the Stream and replace all masked values with NaNs.
     st.merge()
     st.sort()
     # Normalize all traces and throw out traces with no data.
     try:
         max_diff = max([trace.data.max() - trace.data.min() for trace in st \
                     if len(trace.data) > 0])
     except:
         pass
     for trace in st:
         if (np.ma.is_masked(trace.data) and not False in trace.data._mask)or\
             len(trace.data) == 0:
             trace.data = np.array([])
         else:
             trace.data = trace.data - trace.data.mean()
             trace.data = trace.data / (max_diff / 2)
     NV.stream = st
     # Get the min. starttime and the max. endtime.
     starttime = UTCDateTime(NV.starttime.get())
     endtime = UTCDateTime(NV.endtime.get())
     for trace in NV.stream:
         if np.ma.is_masked(trace):
             trace = trace.data[trace._mask] = np.NaN
     # Loop over all traces again and fill with NaNs.
     for trace in NV.stream:
         startgaps = int(round((trace.stats.starttime - starttime) * \
                             trace.stats.sampling_rate))
         endgaps = int(round((endtime - trace.stats.endtime) * \
                             trace.stats.sampling_rate))
         print endgaps
         if startgaps or endgaps:
             if startgaps > 0:
                 start = np.empty(startgaps)
                 start[:] = np.NaN
             else:
                 start = []
             if endgaps > 0:
                 end = np.empty(endgaps)
                 end[:] = np.NaN
             else:
                 end = []
             trace.data = np.concatenate([start, trace.data, end])
             trace.stats.npts = trace.data.size
             trace.stats.starttime = UTCDateTime(NV.starttime.get())
             #trace.stats.endtime = UTCDateTime(NV.endtime.get())
     status_bar.configure(text='')
     status_bar.update_idletasks()
     create_graph()
Example #37
0
def makeSynt(st, mti, mo, args):

    from obspy.core import Stream, Trace
    from math import cos, sin, radians
    import numpy as np
    import sys

    mxx = mti[0]
    myy = mti[1]
    mxy = mti[2]
    mxz = mti[3]
    myz = mti[4]
    mzz = mti[5]
    mo = 1.0 * mo
    MM = mo / (1e20)
    MM = 1.0

    # initialize
    synt = Stream()
    staz_list = []

    # npts
    npts = st[0].stats.npts
    delta = st[0].stats.delta

    # aquire station list
    for i in range(len(st)):
        if (st[i].stats.channel == 'tss'):
            staz_list.append(st[i].stats.station)

    # for each station of list: create 3 traces: ver,rad,tan
    # compute time elements for each one and update stats

    # make synt. Repeated 3 time the component loops over npts on syn.data
    # because of some pointer confusion (mine)
    k = 1
    for l in range(len(staz_list)):

        dati = np.arange(npts) * 0.0  #!!! initialize with floats
        az = radians(st[l * 10 + k].stats.az)

        ###############
        # TAN Component
        syn = Trace(dati)
        for i in range(npts):

            syn.data[i] =    mxx*0.5*st[l*10+k*0].data[i]*sin(2*az) \
                           - myy*0.5*st[l*10+k*0].data[i]*sin(2*az) \
                           - mxy*1.0*st[l*10+k*0].data[i]*cos(2*az) \
                           - mxz*1.0*st[l*10+k*1].data[i]*sin(1*az) \
                           + myz*1.0*st[l*10+k*1].data[i]*cos(1*az)

        # apply Mo
        syn.data = syn.data * MM * (-1)

        # update stats
        syn.stats.station = st[l * 10].stats.station
        syn.stats.channel = 'TAN'
        syn.stats.az = st[l * 10].stats.az
        syn.stats.baz = st[l * 10].stats.baz
        syn.stats.dist = st[l * 10].stats.dist
        syn.stats.gcarc = st[l * 10].stats.gcarc
        syn.stats.evla = st[l * 10].stats.evla
        syn.stats.evlo = st[l * 10].stats.evlo
        syn.stats.stlo = st[l * 10].stats.stlo
        syn.stats.stla = st[l * 10].stats.stla
        syn.stats.delta = delta

        # add to synt stream
        synt.append(syn)

        ###############
        # RAD Component
        syn = Trace(dati)
        for i in range(npts):

            syn.data[i] =    mxx*1/6*st[l*10+k*4].data[i]*(+1) \
                           - mxx*0.5*st[l*10+k*2].data[i]*cos(2*az) \
                           + mxx*1/3*st[l*10+k*8].data[i] \
                           + myy*1/6*st[l*10+k*4].data[i]*(+1) \
                           + myy*0.5*st[l*10+k*2].data[i]*cos(2*az) \
                           + myy*1/3*st[l*10+k*8].data[i] \
                           + mzz*1/3*st[l*10+k*8].data[i] \
                           - mzz*1/3*st[l*10+k*4].data[i]*(+1) \
                           - mxy*1.0*st[l*10+k*2].data[i]*sin(2*az) \
                           + mxz*1.0*st[l*10+k*3].data[i]*cos(1*az) \
                           + myz*1.0*st[l*10+k*3].data[i]*sin(1*az)

        # apply Mo
        syn.data = syn.data * MM * (-1)

        # update stats
        syn.stats.station = st[l * 10].stats.station
        syn.stats.channel = 'RAD'
        syn.stats.az = st[l * 10].stats.az
        syn.stats.baz = st[l * 10].stats.baz
        syn.stats.dist = st[l * 10].stats.dist
        syn.stats.gcarc = st[l * 10].stats.gcarc
        syn.stats.evla = st[l * 10].stats.evla
        syn.stats.evlo = st[l * 10].stats.evlo
        syn.stats.stlo = st[l * 10].stats.stlo
        syn.stats.stla = st[l * 10].stats.stla
        syn.stats.delta = delta

        # add to synt stream
        synt.append(syn)

        ###############
        # VER Component
        syn = Trace(dati)
        for i in range(npts):

            syn.data[i] =    mxx*1/6*st[l*10+k*7].data[i] \
                           - mxx*0.5*st[l*10+k*5].data[i]*(+1)*cos(2*az) \
                           + mxx*1/3*st[l*10+k*9].data[i] \
                           + myy*1/6*st[l*10+k*7].data[i] \
                           + myy*0.5*st[l*10+k*5].data[i]*(+1)*cos(2*az) \
                           + myy*1/3*st[l*10+k*9].data[i] \
                           + mzz*1/3*st[l*10+k*9].data[i] \
                           - mzz*1/3*st[l*10+k*7].data[i] \
                           - mxy*1.0*st[l*10+k*5].data[i]*(+1)*sin(2*az) \
                           + mxz*1.0*st[l*10+k*6].data[i]*(+1)*cos(1*az) \
                           + myz*1.0*st[l*10+k*6].data[i]*(+1)*sin(1*az)

        # apply Mo
        syn.data = syn.data * MM * (+1)

        # update stats
        syn.stats.station = st[l * 10].stats.station
        syn.stats.channel = 'VER'
        syn.stats.az = st[l * 10].stats.az
        syn.stats.baz = st[l * 10].stats.baz
        syn.stats.dist = st[l * 10].stats.dist
        syn.stats.gcarc = st[l * 10].stats.gcarc
        syn.stats.evla = st[l * 10].stats.evla
        syn.stats.evlo = st[l * 10].stats.evlo
        syn.stats.stlo = st[l * 10].stats.stlo
        syn.stats.stla = st[l * 10].stats.stla
        syn.stats.delta = delta

        # add to synt stream
        synt.append(syn)

    return synt
Example #38
0
def usarray_read(fname):
    """ Read the BAM US-Array lbv data format used on Mike-2 test specimen.

    Read the BAM US-Array lbv data format used on Mike-2 test specimen into a
    stream object.
    As there is no obvious station (or any other) information in the data file.
    As the parameters are not supposed to change, they are hardcoded here.

    :parameters:
    ------------
    :type fname: string
    :param fname: Path to the file containing the data to be read
        (WITHOUT EXTENSION) extensions .dat and .hdr will be added
        automatically
    :rtype: :class:`~obspy.core.Stream` object
    :return: **st**: obspy.core.Stream object
        Obspy stream object containing the data
    """

    # filenames
    lbvfilename = fname + '.lbv'
    hdrfilename = fname + '.hdr'

    # initialise
    st = Stream()
    tr = Trace()
    # tr = SacIO()

    # static parameters
    t = os.path.getmtime(hdrfilename)
    tt = datetime.datetime.fromtimestamp(t)

    tr.stats['starttime'] = UTCDateTime(tt.year, tt.month, tt.day, tt.hour,
                                        tt.minute, tt.second, tt.microsecond)
    tr.stats['network'] = 'BAM-USArray'
    tr.stats['channel'] = 'z'

    # reading header from file
    fh = open(hdrfilename, 'r')
    while True:
        line = fh.readline()
        if line.__len__() < 1:
            break
        line = line.rstrip()
        if line.find('PK') > -1:
            parts = re.split(':', line)
            tr.stats['location'] = parts[1].lstrip()
        if line.find('transceivers') > -1:
            parts = re.split(':', line)
            ntra = int(parts[1].lstrip())
            traco = np.zeros((ntra, 3), float)
            for i in range(ntra):
                coordstr = fh.readline().split()
                for j in range(3):
                    traco[i, j] = float(coordstr[j])
        if line.find('measurements') > -1:
            parts = re.split(':', line)
            nmeas = int(parts[1].lstrip())
            measco = np.zeros((nmeas, 2), int)
            for i in range(nmeas):
                configstr = fh.readline().split()
                for j in range(2):
                    measco[i, j] = float(configstr[j])
        if line.find('samples') > -1:
            parts = re.split(':', line)
            tr.stats['npts'] = int(parts[1].lstrip())
        if line.find('samplefreq') > -1:
            parts = re.split(':', line)
            tr.stats['sampling_rate'] = int(parts[1].lstrip())

    fh.close()

    # reading data from file
    fd = open(lbvfilename, 'rb')
    datatype = '>i2'
    read_data = np.fromfile(file=fd, dtype=datatype)
    fd.close()

    # sort and store traces
    for i in range(nmeas):
        # receiver number stored as station name
        tr.stats['station'] = str(measco[i, 1])
        # receiver coords (storing not yet implemented)
        stla = traco[measco[i, 1] - 1, 1]  # x
        stlo = traco[measco[i, 1] - 1, 1]  # y
        stel = traco[measco[i, 1] - 1, 1]  # z
        # transmitter number stored as event name (storing not yet implemented)
        kevnm = str(measco[i, 0])
        # transmitter coords (storing not yet implemented)
        evla = traco[measco[i, 1] - 1, 0]  # x
        evlo = traco[measco[i, 1] - 1, 0]  # y
        evdp = traco[measco[i, 1] - 1, 0]  # z
        tr.data = read_data[i * tr.stats.npts:(i + 1) * tr.stats.npts]
        st.extend([tr])
        # plot 1 trace for test purposes
        # if i==20:
        #    tr.plot()
        #    print ('plot done')

    return st
Example #39
0
def bin_baz_slow(stream1, stream2=None, nbaz=36 + 1, nslow=20 + 1, pws=False):
    """ 
    Function to stack receiver functions into back-azimuth and slowness bins.
    This can be done using a linear stack (i.e., simple
    mean), or using phase-weighted stacking.

    Parameters
    ----------
    stream1 : :class:`~obspy.core.Stream`
        Stream of equal-length seismograms to be stacked into
        a single trace.
    stream2 : :class:`~obspy.core.Stream`
        Optionally stack a second stream in the same operation.
    dbaz : int
        Number of bazk-azimuth samples in bins
    dslow : int
        Number of slowness samples in bins
    pws : bool
        Whether or not to perform phase-weighted stacking

    Returns
    -------
    stack : :class:`~obspy.core.Stream`
        Stream containing one or two stacked traces,
        depending on the number of input streams

    """

    # Define back-azimuth and slowness bins
    baz_bins = np.linspace(0, 360, nbaz)
    slow_bins = np.linspace(0.04, 0.08, nslow)

    # Extract baz and slowness
    baz = [stream1[i].stats.baz for i in range(len(stream1))]
    slow = [stream1[i].stats.slow for i in range(len(stream1))]

    # Digitize baz and slowness
    ibaz = np.digitize(baz, baz_bins)
    islow = np.digitize(slow, slow_bins)

    final_stream = []

    for stream in [stream1, stream2]:
        try:
            # Define empty streams
            binned_stream = Stream()

            # Loop through baz_bins
            for i in range(nbaz):
                for j in range(nslow):

                    nbin = 0
                    array = np.zeros(len(stream[0].data))
                    weight = np.zeros(len(stream[0].data), dtype=complex)

                    # Loop all traces
                    for k, tr in enumerate(stream):

                        # If index of baz_bins is equal to ibaz
                        if i == ibaz[k] and j == islow[k]:

                            nbin += 1
                            array += tr.data
                            hilb = hilbert(tr.data)
                            phase = np.arctan2(hilb.imag, hilb.real)
                            weight += np.exp(1j * phase)

                            continue

                    if nbin > 0:

                        # Average and update stats
                        array /= nbin
                        weight = np.real(abs(weight / nbin))

                        trace = Trace(header=stream[0].stats)
                        trace.stats.baz = baz_bins[i]
                        trace.stats.slow = slow_bins[j]
                        trace.stats.nbin = nbin

                        if not pws:
                            weight = np.ones(len(stream[0].data))
                        trace.data = weight * array
                        binned_stream.append(trace)

            final_stream.append(binned_stream)

        except:
            continue

    return final_stream
Example #40
0
def add_corr(params,
             station1,
             station2,
             filterid,
             date,
             time,
             duration,
             components,
             CF,
             sampling_rate,
             name='corr',
             day=False,
             ncorr=0):
    """
    Adds a CCF to the data archive on disk.
    
    :type params: dict 
    :param params: This dictionary contains all parameters for correlaion. see params.py to initilalize it.
    :type station1: str
    :param station1: The name of station 1 (formatted NET.STA)
    :type station2: str
    :param station2: The name of station 2 (formatted NET.STA)
    :type filterid: int
    :param filterid: The ID (ref) of the filter
    :type date: datetime.date or str
    :param date: The date of the CCF
    :type time: datetime.time or str
    :param time: The time of the CCF
    :type duration: float
    :param duration: The total duration of the exported CCF
    :type components: str
    :param components: The name of the components used (ZZ, ZR, ...)
    :type sampling_rate: float
    :param sampling_rate: The sampling rate of the exported CCF
    :type day: bool
    :param day: Whether this function is called to export a daily stack (True)
        or each CCF (when keep_all parameter is set to True in the
        configuration). Defaults to True.
    :type ncorr: int
    :param ncorr: Number of CCF that have been stacked for this CCF.
    """

    output_folder = params['output_folder']
    export_format = params['export_format']
    sac, mseed = False, False
    if export_format in ["BOTH", "both"]:
        mseed = True
        sac = True
    elif export_format in ["SAC", "sac"]:
        sac = True
    elif export_format in ["MSEED", "mseed"]:
        mseed = True
    if params['crosscorr']: pass

    if day:
        path = os.path.join("STACKS", '%s' % name, "%02i" % filterid,
                            "001_DAYS", components,
                            "%s_%s" % (station1, station2), str(date))
        pair = "%s:%s" % (station1, station2)
        if mseed:
            export_mseed(params, path, pair, components, filterid, CF / ncorr,
                         ncorr)
        if sac:
            export_sac(params, path, pair, components, filterid, CF / ncorr,
                       ncorr)

    else:
        file = '%s.cc' % time
        path = os.path.join(output_folder, "%02i" % filterid, station1,
                            station2, components, date)
        if not os.path.isdir(path):
            os.makedirs(path)

        t = Trace()
        t.data = CF
        t.stats.sampling_rate = sampling_rate
        t.stats.starttime = -float(params['maxlag'])
        t.stats.components = components
        # if ncorr != 0:
        # t.stats.location = "%02i"%ncorr
        st = Stream(traces=[
            t,
        ])
        st.write(os.path.join(path, file), format='mseed')
        del t, st
Example #41
0
def bin(stream1, stream2=None, typ='baz', nbin=36 + 1, pws=False):
    """ 
    Function to stack receiver functions into (baz or slow) bins
    This can be done using a linear stack (i.e., simple
    mean), or using phase-weighted stacking.

    Parameters
    ----------
    stream1 : :class:`~obspy.core.Stream`
        Stream of equal-length seismograms to be stacked into
        a single trace.
    stream2 : :class:`~obspy.core.Stream`
        Optionally stack a second stream in the same operation.
    dbaz : int
        Number of bazk-azimuth samples in bins
    dslow : int
        Number of slowness samples in bins
    pws : bool
        Whether or not to perform phase-weighted stacking

    Returns
    -------
    stack : :class:`~obspy.core.Stream`
        Stream containing one or two stacked traces,
        depending on the number of input streams

    """

    if not typ in ['baz', 'slow', 'dist']:
        raise (Exception("Type has to be 'baz' or 'slow' or 'dist'"))

    if typ == 'baz':
        bmin = 0
        bmax = 360
        stat = [stream1[i].stats.baz for i in range(len(stream1))]
    elif typ == 'slow':
        stat = [stream1[i].stats.slow for i in range(len(stream1))]
        bmin = np.min(np.array(stat))
        bmax = np.max(np.array(stat))
    elif typ == 'dist':
        stat = [stream1[i].stats.gac for i in range(len(stream1))]
        bmin = np.min(np.array(stat))
        bmax = np.max(np.array(stat))

    # Define bins
    bins = np.linspace(bmin, bmax, nbin)

    # Digitize stat
    ind = np.digitize(stat, bins)

    final_stream = []

    for stream in [stream1, stream2]:
        try:
            # Define empty streams
            binned_stream = Stream()

            # Loop through bins
            for i in range(nbin):

                nb = 0
                array = np.zeros(len(stream[0].data))
                weight = np.zeros(len(stream[0].data), dtype=complex)

                # Loop through stat
                for j, tr in enumerate(stream):

                    # If index of bins is equal to ind
                    if i == ind[j]:

                        nb += 1
                        array += tr.data
                        hilb = hilbert(tr.data)
                        phase = np.arctan2(hilb.imag, hilb.real)
                        weight += np.exp(1j * phase)

                        continue

                if nb > 0:

                    # Average and update stats
                    array /= nb
                    weight = np.real(abs(weight / np.float(nb)))

                    trace = Trace(header=stream[0].stats)
                    trace.stats.nbin = nb
                    if typ == 'baz':
                        trace.stats.baz = bins[i]
                        trace.stats.slow = None
                        trace.stats.nbin = nb
                    elif typ == 'slow':
                        trace.stats.slow = bins[i]
                        trace.stats.baz = None
                        trace.stats.nbin = nb
                    elif typ == 'dist':
                        trace.stats.dist = bins[i]
                        trace.stats.slow = None
                        trace.stats.baz = None
                        trace.stats.nbin = nb
                    if not pws:
                        weight = np.ones(len(stream[0].data))
                    trace.data = weight * array
                    binned_stream.append(trace)

            final_stream.append(binned_stream)

        except:
            continue

    return final_stream
Example #42
0
import numpy as np
from obspy.core import Trace, UTCDateTime

x = np.zeros(200)
x[100] = 1
tr = Trace(x)
tr.stats.network = "XX"
tr.stats.station = "SDFD1"
print tr
print "showing original trace"
tr.plot()
tr.stats.sampling_rate = 20
tr.stats.starttime = UTCDateTime(2011, 2, 21, 8)
print tr
tr.plot()

tr.filter("lowpass", freq=1)
print "showing filtered trace"
tr.plot()
tr.trim(tr.stats.starttime + 4.5, tr.stats.endtime - 2)
print "showing trimmed trace"
tr.plot()

tr.data = tr.data * 500
tr2 = tr.copy()
tr2.data = tr2.data + np.random.randn(len(tr2))
tr2.stats.station = "SDFD2"
print tr2
print "showing trace with gaussian noise added"
tr2.plot()
Example #43
0
        except Exception as e:
            print e

    print "STACK"
    corrc3 = stack(datac3, stack_method=p['stack_method'])

    try:
        os.makedirs('c3/%s/%s.%s/' % (staTarget1, comp1, comp2))

#     os.makedirs('c1/%s/'%staTarget1)
    except:
        pass
    t = Trace()
    t.stats.station = 'c3'
    t.stats.sampling_rate = p['df']
    t.data = np.array(corrc3[::-1])
    t.stats.starttime -= (len(corrc3) / 2) / p['df']
    t.write('c3/%s/%s.%s/BO.c3.%s.%s.%s.mseed'%(staTarget1,\
        comp1, comp2, namepairA_B, comp1, comp2), format='MSEED')
    # t2 = Trace()
    # t2.stats.station = 'c1'
    # t2.stats.sampling_rate = df
    # t2.data= np.array(aa)
    # t2.stats.starttime -= ( len(aa) / 2 ) / df
    # t2.write('c1/%s/c1.%s.mseed'%(staTarget1, pair), format='MSEED')

if __name__ == '__main__':
    #staTarget1 = '235713'
    #staTarget2 = '236977'
    #depth = 0
    t = time.time()
Example #44
0
def main():
    db = connect()

    logging.basicConfig(level=logging.INFO,
                        format='%(asctime)s [%(levelname)s] %(message)s',
                        datefmt='%Y-%m-%d %H:%M:%S')

    logging.info('*** Starting: Compute SARA_RATIO ***')

    while is_next_job(db, jobtype='SARA_RATIO'):
        t0 = time.time()
        jobs = get_next_job(db, jobtype='SARA_RATIO')
        stations = []
        pairs = []
        refs = []

        for job in jobs:
            refs.append(job.ref)
            pairs.append(job.pair)
            netsta1, netsta2 = job.pair.split(':')
            stations.append(netsta1)
            stations.append(netsta2)
            goal_day = job.day

        stations = np.unique(stations)

        logging.info("New SARA Job: %s (%i pairs with %i stations)" %
                     (goal_day, len(pairs), len(stations)))

        logging.debug(
            "Preloading all envelopes and applying site and sensitivity")
        all = {}
        for station in stations:
            tmp = get_sara_param(db, station)
            sensitivity = tmp.sensitivity
            site_effect = tmp.site_effect
            try:
                tmp = read(
                    os.path.join("SARA", "ENV", station,
                                 "%s.MSEED" % goal_day))
            except:
                logging.debug("Error reading %s:%s" % (station, goal_day))
                continue
            for trace in tmp:
                trace.data /= (sensitivity * site_effect)
            all[station] = tmp

        logging.debug("Computing all pairs")
        for job in jobs:
            netsta1, netsta2 = job.pair.split(':')
            net1, sta1 = netsta1.split(".")
            net2, sta2 = netsta2.split(".")
            trace = Trace()
            if netsta1 not in all or netsta2 not in all:
                update_job(db,
                           job.day,
                           job.pair,
                           'SARA_RATIO',
                           'D',
                           ref=job.ref)
                continue
            tmp = Stream()
            for tr in all[netsta1]:
                tmp += tr
            for tr in all[netsta2]:
                tmp += tr
            # tmp = Stream(traces=[all[netsta1], all[netsta2]])
            # print(tmp)
            tmp.merge()
            tmp = make_same_length(tmp)
            tmp.merge(fill_value=np.nan)
            if len(tmp) > 1:
                trace.data = tmp.select(network=net1, station=sta1)[0].data / \
                             tmp.select(network=net2, station=sta2)[0].data
                trace.stats.starttime = tmp[0].stats.starttime
                trace.stats.delta = tmp[0].stats.delta

                env_output_dir = os.path.join('SARA', 'RATIO',
                                              job.pair.replace(":", "_"))
                if not os.path.isdir(env_output_dir):
                    os.makedirs(env_output_dir)
                trace.write(os.path.join(env_output_dir, goal_day + '.MSEED'),
                            format="MSEED",
                            encoding="FLOAT32")

            update_job(db, job.day, job.pair, 'SARA_RATIO', 'D', ref=job.ref)
            del tmp
        logging.info("Done. It took %.2f seconds" % (time.time() - t0))