Esempio n. 1
0
def XCM2(events):
    n_events = len(events)
    #print n_events

    xcorr_vals_pos = zeros((n_events, n_events))
    xcorr_lags_pos = zeros((n_events, n_events))
    xcorr_vals_neg = zeros((n_events, n_events))
    xcorr_lags_neg = zeros((n_events, n_events))

    for i in range(n_events):
        print("{} of {}".format(i, n_events))
        for j in range(i, n_events):
            xcorrij = xcorr(events[i], events[j], 250, full_xcorr=True)
            #Return absolute max XC, including negative values
            xcorr_lags_neg[i, j] = xcorrij[0]
            xcorr_vals_neg[i, j] = xcorrij[1]
            if xcorrij[1] < 0.:
                #Return highest positive XC
                xcorr_lags_pos[i,
                               j], xcorr_vals_pos[i,
                                                  j] = xcorr_max(xcorrij[2],
                                                                 abs_max=False)
            else:
                xcorr_lags_pos[i, j] = xcorrij[0]
                xcorr_vals_pos[i, j] = xcorrij[1]

    return xcorr_vals_pos, xcorr_lags_pos, xcorr_vals_neg, xcorr_lags_neg
Esempio n. 2
0
def xcorr_align_stream(st, config):

    shift_len = config.cc_shift_length
    shifts = []
    for i, tr in enumerate(st):
        a, b, c = xcorr(st[0], tr, shift_len, full_xcorr=True)
        if b < 0:
            a = c.argmax() - shift_len
        shifts.append(a / tr.stats.sampling_rate)

    group_streams = Stream()
    T1 = st[0].copy().stats.starttime
    T2 = st[0].copy().stats.endtime
    for i, tr in enumerate(st):
        tr = tr.copy().trim(tr.stats.starttime - shifts[i],
                            tr.stats.endtime - shifts[i],
                            pad=True,
                            fill_value=0)
        tr.trim(tr.stats.starttime + 1,
                tr.stats.endtime - 1,
                pad=True,
                fill_value=0)
        tr.stats.starttime = T1
        group_streams += tr

    ST = st[0].copy()
    for tr in st[1:]:
        ST.data = ST.data + tr.data
    ST.data = (ST.data / len(st)) * config.digouti
    ST.trim(T1, T2)
    return ST
Esempio n. 3
0
def align_traces(trace_list, shift_len):
    """
    Function to allign traces relative to each other based on their
    cross-correlation value

    :type trace_list: List of Traces
    :param trace_list: List of traces to allign
    :type shift_len: int
    :param shift_len: Length to allow shifting within in samples

    :returns: list of shifts for best allignment in seconds
    """
    from obspy.signal.cross_correlation import xcorr
    from copy import deepcopy
    traces=deepcopy(trace_list)
    # Use trace with largest MAD amplitude as master
    master=traces[0]
    MAD_master=np.median(np.abs(master.data))
    master_no=0
    for i in xrange(1,len(traces)):
        if np.median(np.abs(traces[i])) > MAD_master:
            master=traces[i]
            MAD_master=np.median(np.abs(master.data))
            master_no=i
    shifts=[]
    for i in xrange(len(traces)):
        if not master.stats.sampling_rate == traces[i].stats.sampling_rate:
            raise ValueError('Sampling rates not the same')
        shift, cc=xcorr(master, traces[i], shift_len)
        shifts.append(shift/master.stats.sampling_rate)
    return shifts
Esempio n. 4
0
def calc_triggers(st, config, intsd):
    lags = np.array([])
    lags_inds1 = np.array([])
    lags_inds2 = np.array([])
    #### cross correlate all station pairs ####
    for ii in range(len(st[:-1])):
        for jj in range(ii + 1, len(st)):
            index, value, cc_vector = xcorr(st[ii],
                                            st[jj],
                                            shift_len=config.cc_shift_length,
                                            full_xcorr=True)
            #### if best xcorr value is negative, find the best positive one ####
            if value < 0:
                index = cc_vector.argmax() - config.cc_shift_length
                value = cc_vector.max()
            dt = index / st[0].stats.sampling_rate
            #### check that the best lag is at least the vmin value
            #### and check for minimum cross correlation value
            all_vmin = np.array([v['vmin'] for v in config.VOLCANO]).min()
            if np.abs(dt) < intsd[ii, jj] / all_vmin and value > config.min_cc:
                lags = np.append(lags, dt)
                lags_inds1 = np.append(lags_inds1, ii)
                lags_inds2 = np.append(lags_inds2, jj)

    #### return lag times, and
    return lags, lags_inds1, lags_inds2
Esempio n. 5
0
def cross_chan_coherence(st1,
                         st2,
                         allow_shift=False,
                         shift_len=0.2,
                         i=0,
                         xcorr_func='time_domain'):
    """
    Calculate cross-channel coherency.

    Determine the cross-channel coherency between two streams of multichannel
    seismic data.

    :type st1: obspy.core.stream.Stream
    :param st1: Stream one
    :type st2: obspy.core.stream.Stream
    :param st2: Stream two
    :type allow_shift: bool
    :param allow_shift:
        Whether to allow the optimum alignment to be found for coherence,
        defaults to `False` for strict coherence
    :type shift_len: int
    :param shift_len: Samples to shift, only used if `allow_shift=True`
    :type i: int
    :param i: index used for parallel async processing, returned unaltered
    :type xcorr_func: str, callable
    :param xcorr_func:
        The method for performing correlations. Accepts either a string or
         callabe. See :func:`eqcorrscan.utils.correlate.register_array_xcorr`
         for more details

    :returns:
        cross channel coherence, float - normalized by number of channels,
        and i, where i is int, as input.
    :rtype: tuple
    """
    cccoh = 0.0
    kchan = 0
    array_xcorr = get_array_xcorr(xcorr_func)
    for tr in st1:
        tr2 = st2.select(station=tr.stats.station, channel=tr.stats.channel)
        if len(tr2) > 0 and tr.stats.sampling_rate != \
                tr2[0].stats.sampling_rate:
            warnings.warn('Sampling rates do not match, not using: %s.%s' %
                          (tr.stats.station, tr.stats.channel))
        if len(tr2) > 0 and allow_shift:
            index, corval = xcorr(tr, tr2[0],
                                  int(shift_len * tr.stats.sampling_rate))
            cccoh += corval
            kchan += 1
        elif len(tr2) > 0:
            min_len = min(len(tr.data), len(tr2[0].data))
            cccoh += array_xcorr(np.array([tr.data[0:min_len]]),
                                 tr2[0].data[0:min_len], [0])[0][0][0]
            kchan += 1
    if kchan:
        cccoh /= kchan
        return np.round(cccoh, 6), i
    else:
        warnings.warn('No matching channels')
        return 0, i
Esempio n. 6
0
def plot_synth_real(real_template, synthetic, channels=False):
    r"""Plot multiple channels of data for real data and synthetic.

    :type real_template: obspy.Stream
    :param real_template: Stream of the real template
    :type synthetic: obspy.Stream
    :param synthetic: Stream of synthetic template
    :type channels: list of str
    :param channels: List of tuples of (station, channel) to plot, default is\
            False, which plots all.
    """
    from obspy.signal.cross_correlation import xcorr
    from obspy import Stream
    colours = ['k', 'r']
    labels = ['Real', 'Synthetic']
    if channels:
        real = []
        synth = []
        for stachan in channels:
            real.append(real_template.select(station=stachan[0],
                                             channel=stachan[1]))
            synth.append(synthetic.select(station=stachan[0],
                                          channel=stachan[1]))
        real_template = Stream(real)
        synthetic = Stream(synth)

    # Extract the station and channels
    stachans = list(set([(tr.stats.station, tr.stats.channel)
                         for tr in real_template]))
    fig, axes = plt.subplots(len(stachans), 1, sharex=True, figsize=(5, 10))
    axes = axes.ravel()
    for i, stachan in enumerate(stachans):
        real_tr = real_template.select(station=stachan[0],
                                       channel=stachan[1])[0]
        synth_tr = synthetic.select(station=stachan[0],
                                    channel=stachan[1])[0]
        shift, corr = xcorr(real_tr, synth_tr, 2)
        print('Shifting by: '+str(shift)+' samples')
        if corr < 0:
            synth_tr.data = synth_tr.data * -1
            corr = corr * -1
        if shift < 0:
            synth_tr.data = synth_tr.data[abs(shift):]
            real_tr.data = real_tr.data[0:len(synth_tr.data)]
        elif shift > 0:
            real_tr.data = real_tr.data[abs(shift):]
            synth_tr.data = synth_tr.data[0:len(real_tr.data)]
        for j, tr in enumerate([real_tr, synth_tr]):
            y = tr.data
            y = y / float(max(abs(y)))
            x = np.linspace(0, len(y) * tr.stats.delta, len(y))
            axes[i].plot(x, y, colours[j], linewidth=2.0, label=labels[j])
            axes[i].get_yaxis().set_ticks([])
        ylab = stachan[0]+'.'+stachan[1]+' cc='+str(round(corr, 2))
        axes[i].set_ylabel(ylab, rotation=0)
    plt.subplots_adjust(hspace=0)
    # axes[0].legend()
    axes[-1].set_xlabel('Time (s)')
    plt.show()
Esempio n. 7
0
def xcorrelation(data, maxlag):
    st1 = data[1]
    st2 = data[2]
    tr1 = st1[0].data
    tr2 = st2[0].data
    tr1 = tr1/numpy.linalg.norm(tr1)
    tr2 = tr2/numpy.linalg.norm(tr2)
    return xcorr(tr1, tr2, maxlag, full_xcorr=True)[2]
Esempio n. 8
0
def xcorrelation(data, maxlag):
    st1 = data[1]
    st2 = data[2]
    tr1 = st1[0].data
    tr2 = st2[0].data
    tr1 = tr1/numpy.linalg.norm(tr1)
    tr2 = tr2/numpy.linalg.norm(tr2)
    return xcorr(tr1, tr2, maxlag, full_xcorr=True)[2]
Esempio n. 9
0
def plot_synth_real(real_template, synthetic, channels=False):
    r"""Plot multiple channels of data for real data and synthetic.

    :type real_template: obspy.Stream
    :param real_template: Stream of the real template
    :type synthetic: obspy.Stream
    :param synthetic: Stream of synthetic template
    :type channels: list of str
    :param channels: List of tuples of (station, channel) to plot, default is\
            False, which plots all.
    """
    from obspy.signal.cross_correlation import xcorr
    from obspy import Stream
    colours = ['k', 'r']
    labels = ['Real', 'Synthetic']
    if channels:
        real = []
        synth = []
        for stachan in channels:
            real.append(
                real_template.select(station=stachan[0], channel=stachan[1]))
            synth.append(
                synthetic.select(station=stachan[0], channel=stachan[1]))
        real_template = Stream(real)
        synthetic = Stream(synth)

    # Extract the station and channels
    stachans = list(
        set([(tr.stats.station, tr.stats.channel) for tr in real_template]))
    fig, axes = plt.subplots(len(stachans), 1, sharex=True, figsize=(5, 10))
    axes = axes.ravel()
    for i, stachan in enumerate(stachans):
        real_tr = real_template.select(station=stachan[0],
                                       channel=stachan[1])[0]
        synth_tr = synthetic.select(station=stachan[0], channel=stachan[1])[0]
        shift, corr = xcorr(real_tr, synth_tr, 2)
        print 'Shifting by: ' + str(shift) + ' samples'
        if corr < 0:
            synth_tr.data = synth_tr.data * -1
            corr = corr * -1
        if shift < 0:
            synth_tr.data = synth_tr.data[abs(shift):]
            real_tr.data = real_tr.data[0:len(synth_tr.data)]
        elif shift > 0:
            real_tr.data = real_tr.data[abs(shift):]
            synth_tr.data = synth_tr.data[0:len(real_tr.data)]
        for j, tr in enumerate([real_tr, synth_tr]):
            y = tr.data
            y = y / float(max(abs(y)))
            x = np.linspace(0, len(y) * tr.stats.delta, len(y))
            axes[i].plot(x, y, colours[j], linewidth=2.0, label=labels[j])
            axes[i].get_yaxis().set_ticks([])
        ylab = stachan[0] + '.' + stachan[1] + ' cc=' + str(round(corr, 2))
        axes[i].set_ylabel(ylab, rotation=0)
    plt.subplots_adjust(hspace=0)
    # axes[0].legend()
    axes[-1].set_xlabel('Time (s)')
    plt.show()
Esempio n. 10
0
def writestats(statfile, streamin, comp):
    # This function does the final stat computations for the accelerometer plots just produced
    # This was taken from the synthetics code so the accel plays the role of the synthetic
    debugwstats = True
    #try:
    if True:
        syncomp = "HN" + comp
        datacomp = "BH" + comp

        if debugwstats:
            print(streamin)
            print 'Here is the comp:' + syncomp
        syn = streamin.select(channel=syncomp)
        if debugwstats:
            print(syn)
        for tr in streamin.select(channel=datacomp):
            if debugwstats:
                print 'Decimation factor: ' + str(int(
                    tr.stats.sampling_rate)) + ' ' + str(
                        syn[0].stats.sampling_rate)
            syn[0].decimate(
                int(syn[0].stats.sampling_rate / tr.stats.sampling_rate))
            if len(syn[0].data) != len(tr.data):
                syn[0].data = syn[0].data[:min(
                    [len(syn[0].data), len(tr.data)])]
                tr.data = tr.data[:min([len(syn[0].data), len(tr.data)])]
            if debugwstats:
                print 'Here is the trace value:' + str(
                    numpy.sum(tr.data * syn[0].data))
                print 'Here is the accel value:' + str(
                    numpy.sum(numpy.square(syn[0].data)))
            # Here we compute a residual scale factor
            resi = "{0:.2f}".format(
                numpy.sum(tr.data * syn[0].data) /
                numpy.sum(numpy.square(syn[0].data)))
            if debugwstats:
                print 'Here is the resi:' + str(resi)
            # Lets compute a cross-correlation and lag
            lag, corr = xcorr(tr, syn[0], 50)
            corr = "{0:.2f}".format(corr)
            if debugwstats:
                print 'Here is the corr:' + str(corr)
                print 'Here are the results:' + tr.stats.network + "," + tr.stats.station
                print ' Here are more:' + "," + tr.stats.location + "," + tr.stats.channel + "," + str(
                    resi)
                print 'And more:' + "," + str(lag) + "," + str(corr) + "\n"

            # Now we want to write to a file
            statfile.write(tr.stats.network + "," + tr.stats.station)
            statfile.write("," + tr.stats.location + "," + tr.stats.channel +
                           "," + str(resi))
            statfile.write("," + str(lag) + "," + str(corr) + "\n")

    #except:
    #    if debug:
    #        print 'No residual for' + cursta + ' ' + 'LH' + comp
    return
Esempio n. 11
0
 def apply(self, data):
   L = []
   indices = [c for c in itertools.combinations(range(data.shape[0]), 2)] # all possible pairs of channels
   for idx, pair in enumerate(indices):
     i = pair[0]
     j = pair[1]
     _, _, xc = xcorr(data[i].T, data[j].T, 100, full_xcorr=True)
     xc = xc.reshape((1, xc.shape[0]))
     L.append(xc)
   data1 = np.concatenate(L, axis=0)
   
   return data1
Esempio n. 12
0
    def xcorr(self, itrace0=None, itrace1=None, shift_len=1001, 
            include_auto=False):
        """
        Cross correlate traces
        """
        if itrace0 is None:
            itrace0 = range(len(self.traces))

        if itrace1 is None:
            itrace1 = range(len(self.traces))

        st = Stream()

        if include_auto:
            k = 0
        else:
            k = 1

        i0, i1 = np.triu_indices(len(itrace0), k=k, m=len(itrace1))

        logging.info("Cross correlating {:} trace pairs...", len(self.traces))
        for _itr0, _itr1 in zip(i0, i1):
            itr0 = itrace0[_itr0]
            itr1 = itrace1[_itr1]

            tr0 = self.traces[itr0]
            tr1 = self.traces[itr1]

            assert tr0.stats['sampling_rate']\
                    == tr1.stats['sampling_rate'],\
                    'Sampling rates for traces {:} ({:}) and {:} ({:})'\
                            .format(itr0, tr0.id, itr1, tr1.id)\
                            + ' are not equal.'

            logging.info('... {:} with {:}...', tr0.id, tr1.id)
            
            i, c, _xc = xcorr(self.traces[itr1], self.traces[itr0],
                    shift_len, full_xcorr=True)

            xc = Trace(data=_xc, header=tr0.stats)
            xc.stats['npts'] = len(_xc)
            xc.stats['xcorr_imax'] = i
            xc.stats['xcorr_max'] = c

            for k in ['network', 'station', 'channel']:
                if tr0.stats[k] != tr1.stats[k]:
                    xc.stats[k] = '{:}-{:}'.format(tr0.stats[k],
                            tr1.stats[k])

            st.extend([xc])

        return st
    def apply(self, data):
        L = []
        indices = [c for c in itertools.combinations(range(data.shape[0]), 2)
                   ]  # all possible pairs of channels
        for idx, pair in enumerate(indices):
            i = pair[0]
            j = pair[1]
            _, _, xc = xcorr(data[i].T, data[j].T, 100, full_xcorr=True)
            xc = xc.reshape((1, xc.shape[0]))
            L.append(xc)
        data1 = np.concatenate(L, axis=0)

        return data1
Esempio n. 14
0
def est_SNR_1h(dstart,dend,ch1,ch2):
# here you load all the functions you need to use

  from obspy.seg2.seg2 import readSEG2
  from obspy.core import Stream
  import numpy as np
  from obspy.signal.cross_correlation import xcorr
  from numpy import sign

  dataDir = "/import/three-data/hadzii/STEINACH/STEINACH_longtime/"
  SNR=[]

	# loading the info for outfile-name
  stream_start = readSEG2(dataDir + str(dstart) + ".dat")
  t_start = stream_start[ch1].stats.seg2.ACQUISITION_TIME
  stream_end = readSEG2(dataDir + str(dend) + ".dat")
  t_end = stream_end[ch1].stats.seg2.ACQUISITION_TIME

  
  for k in range(dstart, dend, 1):
   st1 = merge_single(ch1,dstart,k+1)
	 st2 = merge_single(ch2,dstart,k+1)

	 st1.detrend('linear')  
	 st2.detrend('linear') 

	 st1.filter('lowpass',freq = 24, zerophase=True, corners=8)
	 st1.filter('highpass', freq= 0.05, zerophase=True, corners=2) #had to be reduced from 0.1Hz
	 st1.filter('bandstop', freqmin=8, freqmax=14, corners=4, zerophase=True)
	 st2.filter('lowpass',freq = 24, zerophase=True, corners=8)
	 st2.filter('highpass', freq= 0.05, zerophase=True, corners=2) #had to be reduced from 0.1Hz
	 st2.filter('bandstop', freqmin=8, freqmax=14, corners=4, zerophase=True)
	 
	 
	 	# 1-bit normalization
	 tr1 = sign(st1[0].data)
	 tr2 = sign(st2[0].data)    	
	 	
	 # cross-correlation
	 index, value, acorr = xcorr(tr1, tr2, 25000, full_xcorr=True)
	
	 # check sanity
	 if np.max(acorr)>1:
	   acorr = zeros(50001)
   corr += acorr
   SNR_ges=[]
   for isnrb3 in range(40000,49001,500):  # steps of half a windowlength
    endwb3=isnrb3 + 1000  # 1s windows
    SNR_ges.append(np.max(np.abs(corr))/np.std(corr[isnrb3:endwb3]))

   SNR.append(np.max(SNR_ges))
Esempio n. 15
0
def cross_chan_coherence(st1, st2, allow_shift=False, shift_len=0.2, i=0):
    """
    Calculate cross-channel coherency.

    Determine the cross-channel coherency between two streams of multichannel
    seismic data.

    :type st1: obspy.core.stream.Stream
    :param st1: Stream one
    :type st2: obspy.core.stream.Stream
    :param st2: Stream two
    :type allow_shift: bool
    :param allow_shift:
        Whether to allow the optimum alignment to be found for coherence,
        defaults to `False` for strict coherence
    :type shift_len: int
    :param shift_len: Samples to shift, only used if `allow_shift=True`
    :type i: int
    :param i: index used for parallel async processing, returned unaltered

    :returns:
        cross channel coherence, float - normalized by number of channels,
        and i, where i is int, as input.
    :rtype: tuple
    """
    cccoh = 0.0
    kchan = 0
    if allow_shift:
        for tr in st1:
            tr2 = st2.select(station=tr.stats.station,
                             channel=tr.stats.channel)
            if tr2:
                index, corval = xcorr(tr, tr2[0], shift_len)
                cccoh += corval
                kchan += 1
    else:
        for tr in st1:
            tr1 = tr.data
            # Assume you only have one waveform for each channel
            tr2 = st2.select(station=tr.stats.station,
                             channel=tr.stats.channel)
            if tr2:
                cccoh += normxcorr2(tr1, tr2[0].data)[0][0]
                kchan += 1
    if kchan:
        cccoh /= kchan
        return cccoh, i
    else:
        warnings.warn('No matching channels')
        return 0, i
Esempio n. 16
0
def cross_chan_coherence(st1, st2, allow_shift=False, shift_len=0.2, i=0):
    """
    Calculate cross-channel coherency.

    Determine the cross-channel coherency between two streams of \
    multichannel seismic data.

    :type st1: obspy.core.stream.Stream
    :param st1: Stream one
    :type st2: obspy.core.stream.Stream
    :param st2: Stream two
    :type allow_shift: bool
    :param allow_shift: Allow shift?
    :type shift_len: int
    :param shift_len: Samples to shift
    :type i: int
    :param i: index used for parallel async processing, returned unaltered

    :returns: cross channel coherence, float - normalized by number of\
        channels, if i, returns tuple of (cccoh, i) where i is int, as input.
    """

    from eqcorrscan.core.match_filter import normxcorr2
    from obspy.signal.cross_correlation import xcorr
    cccoh = 0.0
    kchan = 0
    if allow_shift:
        for tr in st1:
            tr2 = st2.select(station=tr.stats.station,
                             channel=tr.stats.channel)
            if tr2:
                index, corval = xcorr(tr, tr2[0], shift_len)
                cccoh += corval
                kchan += 1
    else:
        for tr in st1:
            tr1 = tr.data
            # Assume you only have one waveform for each channel
            tr2 = st2.select(station=tr.stats.station,
                             channel=tr.stats.channel)
            if tr2:
                cccoh += normxcorr2(tr1, tr2[0].data)[0][0]
                kchan += 1
    if kchan:
        cccoh = cccoh / kchan
        return (cccoh, i)
    else:
        warnings.warn('No matching channels')
        return (0, i)
Esempio n. 17
0
def cross_correlation(data1, data2):
    """Cross correlate two different traces.

    Args:
        data1, data2 (dict): traces to compare.

    Returns:
        float: time shift
        float: xcorr_value
        unknown: result of xcorr routine
    """
    time_shift, xcorr_value, cross_corr = xcorr(numpy.array(data1),
                                                numpy.array(data2),
                                                shift_len=10,
                                                full_xcorr=True)
    return float(time_shift), float(xcorr_value), cross_corr
Esempio n. 18
0
def accp(st):
    samp_shift = 200
    X = np.linspace(-1 * samp_shift, samp_shift, samp_shift * 2 + 1)

    comp_list = []
    a_list = []
    b_list = []
    xcorr_list = []
    for tr_1 in st:
        station_1 = tr_1.stats.station

        for tr_2 in st:
                station_2 = tr_2.stats.station

                if station_1+station_2 in comp_list or station_2+station_1 in comp_list:
                    continue

                # Avoid the autocorrelation
                if station_1 == station_2:
                    continue

                comp_list.append(station_1 + station_2)

                print '--------------------------------------------------------'
                print tr_1.stats.station, ' with ', tr_2.stats.station

                a, b, xcorr_func = xcorr(tr_1, tr_2, samp_shift, full_xcorr=True)

                print 'Index of max correlation value = ', a
                print 'Value of max correlation value = ', b

                a_list.append(a)
                b_list.append(b)
                xcorr_list.append(xcorr_func)

    ax1 = plt.subplot(1, 1, 1)

    plt.ylabel('Correlation')
    plt.xlabel('Lag')
    ax1.grid()

    for i, comp_xcorr in enumerate(comp_list):

        ax1.plot(X, (xcorr_list[i]), label=comp_xcorr, lw=1.2)

    plt.legend()
    plt.show()
Esempio n. 19
0
def writestats(statfile, streamin, comp):
    #This function does the final stat computations for the accelerometer plots just produced
    #This was taken from the synthetics code so the accel plays the role of the synthetic
    debugwstats = False
    try:
        syncomp = "LN" + comp
        datacomp = "LH" + comp

        if debugwstats:
            print(streamin)
            print('Here is the comp:' + syncomp)
        syn = streamin.select(channel=syncomp)
        if debugwstats:
            print(syn)
        for tr in streamin.select(channel=datacomp):
            if debugwstats:
                print('Here is the trace value:' +
                      str(numpy.sum(tr.data * syn[0].data)))
                print('Here is the accel value:' +
                      str(numpy.sum(numpy.square(syn[0].data))))
#Here we compute a residual scale factor
            resi = "{0:.2f}".format(
                numpy.sum(tr.data * syn[0].data) /
                numpy.sum(numpy.square(syn[0].data)))
            if debugwstats:
                print('Here is the resi:' + str(resi))
#Lets compute a cross-correlation and lag
            lag, corr = xcorr(tr, syn[0], 50)
            corr = "{0:.2f}".format(corr)
            if debugwstats:
                print('Here is the corr:' + str(corr))
                print('Here are the results:' + tr.stats.network + "," +
                      tr.stats.station)
                print(' Here are more:' + "," + tr.stats.location + "," +
                      tr.stats.channel + "," + str(resi))
                print('And more:' + "," + str(lag) + "," + str(corr) + "\n")

#Now we want to write to a file
            statfile.write(tr.stats.network + "," + tr.stats.station)
            statfile.write("," + tr.stats.location + "," + tr.stats.channel +
                           "," + str(resi))
            statfile.write("," + str(lag) + "," + str(corr) + "\n")

    except:
        if debug:
            print('No residual for' + cursta + ' ' + 'LH' + comp)
    return
Esempio n. 20
0
def writestats(statfile, streamin, comp):
    # This function does the final stat computations for the accelerometer plots just produced
    # This was taken from the synthetics code so the accel plays the role of the synthetic
    debugwstats = True
    #try:
    if True:
        syncomp = "HN" + comp    
        datacomp = "BH" + comp
        
        if debugwstats:
            print(streamin)
            print 'Here is the comp:' + syncomp
        syn = streamin.select(channel = syncomp)
        if debugwstats:
            print(syn)
        for tr in streamin.select(channel = datacomp):    
            if debugwstats:
                print 'Decimation factor: ' + str(int(tr.stats.sampling_rate)) + ' ' + str(syn[0].stats.sampling_rate)
            syn[0].decimate(int(syn[0].stats.sampling_rate/tr.stats.sampling_rate))
            if len(syn[0].data) != len(tr.data):
                syn[0].data = syn[0].data[:min([len(syn[0].data), len(tr.data)])]
                tr.data = tr.data[:min([len(syn[0].data), len(tr.data)])]
            if debugwstats:
                print 'Here is the trace value:' + str(numpy.sum(tr.data*syn[0].data))
                print 'Here is the accel value:' + str(numpy.sum(numpy.square(syn[0].data)))   
            # Here we compute a residual scale factor
            resi = "{0:.2f}".format(numpy.sum(tr.data*syn[0].data)/numpy.sum(numpy.square(syn[0].data)))
            if debugwstats:
                print 'Here is the resi:' + str(resi)
            # Lets compute a cross-correlation and lag
            lag, corr = xcorr(tr,syn[0],50)
            corr = "{0:.2f}".format(corr)
            if debugwstats:
                print 'Here is the corr:' + str(corr)
                print 'Here are the results:' + tr.stats.network + "," + tr.stats.station
                print ' Here are more:' + "," + tr.stats.location + "," + tr.stats.channel + "," +  str(resi)
                print 'And more:' + "," + str(lag) + "," + str(corr) + "\n"

            # Now we want to write to a file
            statfile.write(tr.stats.network + "," + tr.stats.station)
            statfile.write("," + tr.stats.location + "," + tr.stats.channel + "," +  str(resi))
            statfile.write("," + str(lag) + "," + str(corr) + "\n")
    
    #except:    
    #    if debug:
    #        print 'No residual for' + cursta + ' ' + 'LH' + comp    
    return
Esempio n. 21
0
def cleanNoise(ob, args):

    List = []
    Max_noise = float(args.noise)
    if Max_noise >= 0:

        for i in range(len(ob) / 3):

            # set temporary data vectors
            r = np.zeros(len(ob[i * 3 + 0]) / 3)
            t = np.zeros(len(ob[i * 3 + 1]) / 3)
            z = np.zeros(len(ob[i * 3 + 2]) / 3)

            # set noise (random uniform) for reference
            n = np.random.uniform(-1, 1, len(ob[i * 3 + 0]))
            w = len(n) / 4

            # Allocate data values
            r = ob[i * 3 + 0].data / max(ob[i * 3 + 0].data)
            t = ob[i * 3 + 1].data / max(ob[i * 3 + 1].data)
            z = ob[i * 3 + 2].data / max(ob[i * 3 + 2].data)

            # 3 comp reference values
            N_r = xcorr(n, r, w)
            N_t = xcorr(n, t, w)
            N_z = xcorr(n, z, w)
            # find for
            R_t = xcorr(r, t, w)
            R_z = xcorr(r, z, w)
            T_z = xcorr(t, z, w)

            M_Rp = (R_t[0] + R_z[0] + T_z[0]) / 3.
            M_Np = (N_t[0] + N_z[0] + N_z[0]) / 3.
            M_Rv = (R_t[1] + R_z[1] + T_z[1]) / 3.
            M_Nv = (N_t[1] + N_z[1] + N_z[1]) / 3.

            if (abs(M_Rv) < Max_noise):
                List.append(i)

        if (len(List) > 0):

            listToRemove = ' '.join([
                '%d:%s' % (List[n], ob[List[n] * 3].stats.station)
                for n in xrange(len(List))
            ])
            print "\n\nStation to clean because of noise: ", listToRemove, "\n\n"

            # Purge station from observed
            ob = purgeStream(ob, List)
            if (len(ob) == 0):
                print "No data left. No solution found. Exit"
                sys.exit()

    return (ob, List)
Esempio n. 22
0
def writestats(statfile,streamin,comp):
	try:
		syncomp = "LX" + comp	
		datacomp = "LH" + comp
		syn = streamin.select(channel = syncomp)
		for tr in streamin.select(channel = datacomp):	
			resi = "{0:.2f}".format(numpy.sum(tr.data*syn[0].data)/numpy.sum(numpy.square(syn[0].data)))
			lag, corr = xcorr(tr,syn[0],500)
			corr = "{0:.2f}".format(corr)
			statfile.write(tr.stats.network + "," + tr.stats.station)
			statfile.write("," + tr.stats.location + "," + tr.stats.channel + "," +  str(resi))
			statfile.write("," + str(lag) + "," + str(corr) + "\n")
	
	except:	
		if debug:
			print 'No residual for' + cursta + ' ' + 'LH' + comp	
	return
Esempio n. 23
0
def template_auto_corr(files, write_shifts=False):
    if write_shifts:
        shift_file = open('/home/chet/data/template_pha_shift.txt', 'w')
    xcorrs = np.zeros((len(files), len(files)))
    file_cnt = 0
    #For each template, correlate with each other template and write value to xcorrs
    for j in range(len(files)):
        print('Running template ' + files[j])
        temp1 = read(files[j])
        temp1.resample(50)
        for i in range(len(files)):
            temp2 = read(files[i])
            temp2.resample(50)
            #print('correlating with '+files[i])
            #Make list of common sta.chans between both templates
            temp1_stachan = []
            temp2_stachan = []
            for tr1 in temp1:
                temp1_stachan.append(tr1.stats.station + '.' +
                                     tr1.stats.channel)
            for tr2 in temp2:
                temp2_stachan.append(tr2.stats.station + '.' +
                                     tr2.stats.channel)
            com_stachan = set(temp1_stachan).intersection(temp2_stachan)
            #Run the cross-correlation loop
            temp_xcorrs = []
            #shifts = []
            for stachan in com_stachan:
                #Use tr.select() to specify sta and chan from stachan list
                temp1_data = temp1.select(station=stachan[0:4],
                                          channel=stachan[5:])
                temp2_data = temp2.select(station=stachan[0:4],
                                          channel=stachan[5:])
                [index, ccc] = xcorr(temp1_data[0], temp2_data[0], 50)
                temp_xcorrs.append(ccc)
                if write_shifts:
                    #Write phase shifts to file for possible use at later date
                    shift_file.write('%s %s %s %s\n' %
                                     (files[j], stachan, ccc, index))
            #What sort of correlation are we doing? Stacked CCC? Mean CCC?
            xcorrs[j, i] = np.mean(temp_xcorrs)
            file_cnt += 1
    if write_shifts:
        shift_file.close()
    return xcorrs
Esempio n. 24
0
def cleanNoise(ob,args):

    List = []
    Max_noise = float(args.noise)
    if Max_noise >= 0:

      for i in range(len(ob)/3):

        # set temporary data vectors
        r = np.zeros(len(ob[i*3+0])/3)
        t = np.zeros(len(ob[i*3+1])/3)
        z = np.zeros(len(ob[i*3+2])/3)

        # set noise (random uniform) for reference
        n = np.random.uniform(-1,1,len(ob[i*3+0]))
        w = len(n)/4

        # Allocate data values
        r = ob[i*3+0].data/max(ob[i*3+0].data)
        t = ob[i*3+1].data/max(ob[i*3+1].data)
        z = ob[i*3+2].data/max(ob[i*3+2].data)

        # 3 comp reference values
        N_r = xcorr(n,r,w) 
        N_t = xcorr(n,t,w) 
        N_z = xcorr(n,z,w) 
        # find for
        R_t = xcorr(r,t,w)
        R_z = xcorr(r,z,w)
        T_z = xcorr(t,z,w)

        M_Rp = (R_t[0] + R_z[0] + T_z[0])/3.
        M_Np = (N_t[0] + N_z[0] + N_z[0])/3.
        M_Rv = (R_t[1] + R_z[1] + T_z[1])/3.
        M_Nv = (N_t[1] + N_z[1] + N_z[1])/3.

        
        if (abs(M_Rv) < Max_noise):
           List.append(i)

      if (len(List) > 0):

         listToRemove = ' '.join(['%d:%s' % (List[n],ob[List[n]*3].stats.station) for n in xrange(len(List))])
         print "\n\nStation to clean because of noise: ",listToRemove,"\n\n"

         # Purge station from observed
         ob  = purgeStream(ob,List)
         if(len(ob)==0):
            print "No data left. No solution found. Exit"
            sys.exit()
 
    return (ob, List)
Esempio n. 25
0
 def checkterms():
     ref = good['00'].select(component="R")[0].data
     angles = np.arange(-5., 5., 0.5)
     corrs = []
     for angle in angles:
         temp = good[loc].select(
             component="R")[0].data * np.cos(np.radians(angle))
         temp += -good[loc].select(
             component="T")[0].data * np.sin(np.radians(angle))
         lag, corr = xcorr(ref, temp, 0, full_xcorr=False)
         corrs.append(corr)
     angle = angles[np.argmax(corrs)]
     amps = {}
     for comp in ['Z', 'R', 'T']:
         amps[comp] = np.sum(
             (good[loc].select(component=comp)[0].data)**2)
         amps[comp] *= 1. / np.sum(
             (good['00'].select(component=comp)[0].data)**2)
     corr = max(corrs)
     return angle, corr, amps
Esempio n. 26
0
def writestats(statfile,streamin,comp):
#This function does the final stat computations for the accelerometer plots just produced
#This was taken from the synthetics code so the accel plays the role of the synthetic
    debugwstats = False
    try:
        syncomp = "LN" + comp    
        datacomp = "LH" + comp
        
        if debugwstats:
            print(streamin)
            print('Here is the comp:' + syncomp)
        syn = streamin.select(channel = syncomp)
        if debugwstats:
            print(syn)
        for tr in streamin.select(channel = datacomp):    
            if debugwstats:
                print('Here is the trace value:' + str(numpy.sum(tr.data*syn[0].data)))
                print('Here is the accel value:' + str(numpy.sum(numpy.square(syn[0].data))))
#Here we compute a residual scale factor
            resi = "{0:.2f}".format(numpy.sum(tr.data*syn[0].data)/numpy.sum(numpy.square(syn[0].data)))
            if debugwstats:
                print('Here is the resi:' + str(resi))
#Lets compute a cross-correlation and lag
            lag, corr = xcorr(tr,syn[0],50)
            corr = "{0:.2f}".format(corr)
            if debugwstats:
                print('Here is the corr:' + str(corr))
                print('Here are the results:' + tr.stats.network + "," + tr.stats.station)
                print(' Here are more:' + "," + tr.stats.location + "," + tr.stats.channel + "," +  str(resi))
                print('And more:' + "," + str(lag) + "," + str(corr) + "\n")

#Now we want to write to a file
            statfile.write(tr.stats.network + "," + tr.stats.station)
            statfile.write("," + tr.stats.location + "," + tr.stats.channel + "," +  str(resi))
            statfile.write("," + str(lag) + "," + str(corr) + "\n")
    
    except:    
        if debug:
            print('No residual for' + cursta + ' ' + 'LH' + comp) 
    return
Esempio n. 27
0
def HVscheme2(st, f0, bazi, astime, aetime, disdeg, eve):

    st2 = finalfilter(st, f0, bazi, astime, aetime, True)
    corrs = []
    win = int(round(1. / f0, 0))
    for window in st2.slide(window_length=win, step=int(round(win / 16., 0))):
        HilbertV = np.imag(hilbert(window.select(component="Z")[0].data))
        lag, corr = xcorr(HilbertV,
                          window.select(component="R")[0].data,
                          5,
                          full_xcorr=False)
        #corr = pearsonr(HilbertV, window.select(component="R")[0].data)
        corrs.append(corr)
    corr = corrs
    HilbertV = np.imag(hilbert(st2.select(component="Z")[0].data))
    oldx = np.asarray(range(
        len(corr))) / (float(len(corr)) / float(len(HilbertV)))
    corr = np.interp(range(len(HilbertV)), oldx, corr)
    env = envelope(st2.select(component="R")[0].data) * envelope(HilbertV)
    HV = envelope(st2.select(component="R")[0].data) / envelope(HilbertV)
    env *= 1. / np.max(np.abs(env))
    #corr *= env

    t = np.asarray(range(len(HilbertV)))

    lim = t[(corr >= .90)]
    if len(lim) == 0:
        return 0., 0., 0.

    HV2 = HV[(corr >= .90)]
    lim = lim[(HV2 <= np.mean(HV2) + 3. * np.std(HV2))
              & (HV2 >= np.mean(HV2) - 3. * np.std(HV2))]
    HV2 = HV2[(HV2 <= np.mean(HV2) + 3. * np.std(HV2))
              & (HV2 >= np.mean(HV2) - 3. * np.std(HV2))]

    mHV = np.mean(HV2)
    stdHV = np.std(HV2)

    return mHV, stdHV, corr
Esempio n. 28
0
def align_traces(trace_list, shift_len, master=False):
    """
    Function to allign traces relative to each other based on their
    cross-correlation value

    :type trace_list: List of Traces
    :param trace_list: List of traces to allign
    :type shift_len: int
    :param shift_len: Length to allow shifting within in samples
    :type master: obspy.Trace
    :param master: Master trace to align to, if set to False will align to the\
        largest amplitude trace (default)

    :returns: list of shifts for best allignment in seconds
    """
    from obspy.signal.cross_correlation import xcorr
    from copy import deepcopy
    traces = deepcopy(trace_list)
    if not master:
        # Use trace with largest MAD amplitude as master
        master = traces[0]
        MAD_master = np.median(np.abs(master.data))
        master_no = 0
        for i in xrange(1, len(traces)):
            if np.median(np.abs(traces[i])) > MAD_master:
                master = traces[i]
                MAD_master = np.median(np.abs(master.data))
                master_no = i
    else:
        print 'Using master given by user'
    shifts = []
    ccs = []
    for i in range(len(traces)):
        if not master.stats.sampling_rate == traces[i].stats.sampling_rate:
            raise ValueError('Sampling rates not the same')
        shift, cc = xcorr(master, traces[i], shift_len)
        shifts.append(shift / master.stats.sampling_rate)
        ccs.append(cc)
    return shifts, ccs
Esempio n. 29
0
def writestats(statfile, streamin, comp):
    """ 
    calculate the correlation coefficient and lag time for the synthetic
    when compared to the observed data and write to a file.
    """
    try:
        syncomp = "LX" + comp    
        datacomp = "LH" + comp
        syn = streamin.select(channel = syncomp)
        for tr in streamin.select(channel = datacomp):    
            resi = "{0:.2f}".format(np.sum(tr.data*syn[0].data)/np.sum(np.square(syn[0].data)))
            lag, corr = xcorr(tr,syn[0],500)
            corr = "{0:.2f}".format(corr)
            statfile.write(tr.stats.network + "," + tr.stats.station)
            statfile.write("," + tr.stats.location + "," + tr.stats.channel + "," +  str(resi))
            statfile.write("," + str(lag) + "," + str(corr) + ", ")
            statfile.write(str(tr.stats.starttime.month) + "/" + str(tr.stats.starttime.day) + \
                "/" + str(tr.stats.starttime.year) + " " + str(tr.stats.starttime.hour) + ":" + \
                str(tr.stats.starttime.minute) + ":" + str(tr.stats.starttime.second) + "\n")
    except:    
        if debug:
            print('No residual for' + tr.stats.station + ' ' + 'LH' + comp) 
    return
Esempio n. 30
0
def align_traces(trace_list, shift_len, master=False):
    """
    Function to allign traces relative to each other based on their \
    cross-correlation value.

    :type trace_list: list of Traces
    :param trace_list: List of traces to allign
    :type shift_len: int
    :param shift_len: Length to allow shifting within in samples
    :type master: obspy.Trace
    :param master: Master trace to align to, if set to False will align to \
        the largest amplitude trace (default)

    :returns: list of shifts for best allignment in seconds
    """
    from obspy.signal.cross_correlation import xcorr
    from copy import deepcopy
    traces = deepcopy(trace_list)
    if not master:
        # Use trace with largest MAD amplitude as master
        master = traces[0]
        MAD_master = np.median(np.abs(master.data))
        for i in range(1, len(traces)):
            if np.median(np.abs(traces[i])) > MAD_master:
                master = traces[i]
                MAD_master = np.median(np.abs(master.data))
    else:
        print('Using master given by user')
    shifts = []
    ccs = []
    for i in range(len(traces)):
        if not master.stats.sampling_rate == traces[i].stats.sampling_rate:
            raise ValueError('Sampling rates not the same')
        shift, cc = xcorr(master, traces[i], shift_len)
        shifts.append(shift / master.stats.sampling_rate)
        ccs.append(cc)
    return shifts, ccs
Esempio n. 31
0
 def test_xcorr(self):
     """
     This tests the old, deprecated xcorr() function.
     """
     with warnings.catch_warnings():
         warnings.simplefilter("ignore", category=ObsPyDeprecationWarning)
         # example 1 - all samples are equal
         np.random.seed(815)  # make test reproducible
         tr1 = np.random.randn(10000).astype(np.float32)
         tr2 = tr1.copy()
         shift, corr = xcorr(tr1, tr2, 100)
         self.assertEqual(shift, 0)
         self.assertAlmostEqual(corr, 1, 2)
         # example 2 - all samples are different
         tr1 = np.ones(10000, dtype=np.float32)
         tr2 = np.zeros(10000, dtype=np.float32)
         shift, corr = xcorr(tr1, tr2, 100)
         self.assertEqual(shift, 0)
         self.assertAlmostEqual(corr, 0, 2)
         # example 3 - shift of 10 samples
         tr1 = np.random.randn(10000).astype(np.float32)
         tr2 = np.concatenate((np.zeros(10), tr1[0:-10]))
         shift, corr = xcorr(tr1, tr2, 100)
         self.assertEqual(shift, -10)
         self.assertAlmostEqual(corr, 1, 2)
         shift, corr = xcorr(tr2, tr1, 100)
         self.assertEqual(shift, 10)
         self.assertAlmostEqual(corr, 1, 2)
         # example 4 - shift of 10 samples + small sine disturbance
         tr1 = (np.random.randn(10000) * 100).astype(np.float32)
         var = np.sin(np.arange(10000, dtype=np.float32) * 0.1)
         tr2 = np.concatenate((np.zeros(10), tr1[0:-10])) * 0.9
         tr2 += var
         shift, corr = xcorr(tr1, tr2, 100)
         self.assertEqual(shift, -10)
         self.assertAlmostEqual(corr, 1, 2)
         shift, corr = xcorr(tr2, tr1, 100)
         self.assertEqual(shift, 10)
         self.assertAlmostEqual(corr, 1, 2)
Esempio n. 32
0
 def test_xcorr(self):
     """
     This tests the old, deprecated xcorr() function.
     """
     with warnings.catch_warnings():
         warnings.simplefilter("ignore", category=ObsPyDeprecationWarning)
         # example 1 - all samples are equal
         np.random.seed(815)  # make test reproducible
         tr1 = np.random.randn(10000).astype(np.float32)
         tr2 = tr1.copy()
         shift, corr = xcorr(tr1, tr2, 100)
         self.assertEqual(shift, 0)
         self.assertAlmostEqual(corr, 1, 2)
         # example 2 - all samples are different
         tr1 = np.ones(10000, dtype=np.float32)
         tr2 = np.zeros(10000, dtype=np.float32)
         shift, corr = xcorr(tr1, tr2, 100)
         self.assertEqual(shift, 0)
         self.assertAlmostEqual(corr, 0, 2)
         # example 3 - shift of 10 samples
         tr1 = np.random.randn(10000).astype(np.float32)
         tr2 = np.concatenate((np.zeros(10), tr1[0:-10]))
         shift, corr = xcorr(tr1, tr2, 100)
         self.assertEqual(shift, -10)
         self.assertAlmostEqual(corr, 1, 2)
         shift, corr = xcorr(tr2, tr1, 100)
         self.assertEqual(shift, 10)
         self.assertAlmostEqual(corr, 1, 2)
         # example 4 - shift of 10 samples + small sine disturbance
         tr1 = (np.random.randn(10000) * 100).astype(np.float32)
         var = np.sin(np.arange(10000, dtype=np.float32) * 0.1)
         tr2 = np.concatenate((np.zeros(10), tr1[0:-10])) * 0.9
         tr2 += var
         shift, corr = xcorr(tr1, tr2, 100)
         self.assertEqual(shift, -10)
         self.assertAlmostEqual(corr, 1, 2)
         shift, corr = xcorr(tr2, tr1, 100)
         self.assertEqual(shift, 10)
         self.assertAlmostEqual(corr, 1, 2)
Esempio n. 33
0
 def test_xcorr(self):
     """
     """
     # example 1 - all samples are equal
     np.random.seed(815)  # make test reproducible
     tr1 = np.random.randn(10000).astype(np.float32)
     tr2 = tr1.copy()
     shift, corr = xcorr(tr1, tr2, 100)
     self.assertEqual(shift, 0)
     self.assertAlmostEqual(corr, 1, 2)
     # example 2 - all samples are different
     tr1 = np.ones(10000, dtype=np.float32)
     tr2 = np.zeros(10000, dtype=np.float32)
     shift, corr = xcorr(tr1, tr2, 100)
     self.assertEqual(shift, 0)
     self.assertAlmostEqual(corr, 0, 2)
     # example 3 - shift of 10 samples
     tr1 = np.random.randn(10000).astype(np.float32)
     tr2 = np.concatenate((np.zeros(10), tr1[0:-10]))
     shift, corr = xcorr(tr1, tr2, 100)
     self.assertEqual(shift, -10)
     self.assertAlmostEqual(corr, 1, 2)
     shift, corr = xcorr(tr2, tr1, 100)
     self.assertEqual(shift, 10)
     self.assertAlmostEqual(corr, 1, 2)
     # example 4 - shift of 10 samples + small sine disturbance
     tr1 = (np.random.randn(10000) * 100).astype(np.float32)
     var = np.sin(np.arange(10000, dtype=np.float32) * 0.1)
     tr2 = np.concatenate((np.zeros(10), tr1[0:-10])) * 0.9
     tr2 += var
     shift, corr = xcorr(tr1, tr2, 100)
     self.assertEqual(shift, -10)
     self.assertAlmostEqual(corr, 1, 2)
     shift, corr = xcorr(tr2, tr1, 100)
     self.assertEqual(shift, 10)
     self.assertAlmostEqual(corr, 1, 2)
Esempio n. 34
0
 def test_xcorr(self):
     """
     """
     # example 1 - all samples are equal
     np.random.seed(815)  # make test reproducible
     tr1 = np.random.randn(10000).astype(np.float32)
     tr2 = tr1.copy()
     shift, corr = xcorr(tr1, tr2, 100)
     self.assertEqual(shift, 0)
     self.assertAlmostEqual(corr, 1, 2)
     # example 2 - all samples are different
     tr1 = np.ones(10000, dtype=np.float32)
     tr2 = np.zeros(10000, dtype=np.float32)
     shift, corr = xcorr(tr1, tr2, 100)
     self.assertEqual(shift, 0)
     self.assertAlmostEqual(corr, 0, 2)
     # example 3 - shift of 10 samples
     tr1 = np.random.randn(10000).astype(np.float32)
     tr2 = np.concatenate((np.zeros(10), tr1[0:-10]))
     shift, corr = xcorr(tr1, tr2, 100)
     self.assertEqual(shift, -10)
     self.assertAlmostEqual(corr, 1, 2)
     shift, corr = xcorr(tr2, tr1, 100)
     self.assertEqual(shift, 10)
     self.assertAlmostEqual(corr, 1, 2)
     # example 4 - shift of 10 samples + small sine disturbance
     tr1 = (np.random.randn(10000) * 100).astype(np.float32)
     var = np.sin(np.arange(10000, dtype=np.float32) * 0.1)
     tr2 = np.concatenate((np.zeros(10), tr1[0:-10])) * 0.9
     tr2 += var
     shift, corr = xcorr(tr1, tr2, 100)
     self.assertEqual(shift, -10)
     self.assertAlmostEqual(corr, 1, 2)
     shift, corr = xcorr(tr2, tr1, 100)
     self.assertEqual(shift, 10)
     self.assertAlmostEqual(corr, 1, 2)
Esempio n. 35
0
def cc_core(ls_first, ls_second, identity_all, max_ts, print_sta):
    
    """
    Perform the main part of the cross correlation and creating 
    the cc.txt file
    """
    
    global input
    
    try:
        
        cc_open = open('./cc.txt', 'a')
        
        tr1 = read(ls_first)[0]
            
        if input['phase'] != 'N':
            evsta_dist = util.locations2degrees(lat1 = tr1.stats.sac.evla, \
                                    long1 = tr1.stats.sac.evlo, lat2 = tr1.stats.sac.stla, \
                                    long2 = tr1.stats.sac.stlo)
            
            taup_tt = taup.getTravelTimes(delta = evsta_dist, depth = tr1.stats.sac.evdp)
            
            phase_exist = 'N'
            
            for tt_item in taup_tt:
                if tt_item['phase_name'] == input['phase']:
                    print 'Requested phase:'
                    print input['phase']
                    print '------'
                    print tt_item['phase_name']
                    print 'exists in the waveform!'
                    print '-----------------------'
                    t_phase = tt_item['time']
                    
                    phase_exist = 'Y'
                    break
                    
        if input['phase'] == 'N' or (input['phase'] != 'N' and phase_exist == 'Y'):
            
            # identity of the current waveform
            identity = tr1.stats.network + '.' + tr1.stats.station + '.' + \
                        tr1.stats.location + '.' + tr1.stats.channel
            
            # Keep the current identity in a new variable
            id_name = identity
            
            try:
                tr2 = read(os.path.join(input['second_path'], identity))[0]
            except Exception, error:
                # if it is not possible to read the identity in the second path
                # then change the network part of the identity based on
                # correction unit
                identity = input['corr_unit'] + '.' + tr1.stats.station + '.' + \
                        tr1.stats.location + '.' + tr1.stats.channel
                tr2 = read(os.path.join(input['second_path'], identity))[0]
            
            if input['resample'] != 'N':
                print 'WARNING: you are using resample!!!'
                tr1.resample(input['resample'])
                tr2.resample(input['resample'])
            
            if input['tw'] == 'Y':
                t_cut_1 = tr1.stats.starttime + t_phase - input['preset']
                t_cut_2 = tr1.stats.starttime + t_phase + input['offset']
                tr1.trim(starttime = t_cut_1, endtime = t_cut_2)
                
                t_cut_1 = tr2.stats.starttime + t_phase - input['preset']
                t_cut_2 = tr2.stats.starttime + t_phase + input['offset']
                tr2.trim(starttime = t_cut_1, endtime = t_cut_2)
            
            if input['hlfilter'] == 'Y':
                tr1.filter('lowpass', freq=input['hfreq'], corners=2)
                tr2.filter('lowpass', freq=input['hfreq'], corners=2)
                tr1.filter('highpass', freq=input['lfreq'], corners=2)
                tr2.filter('highpass', freq=input['lfreq'], corners=2)
            
            # normalization of all three waveforms to the 
            # max(max(tr1), max(tr2), max(tr3)) to keep the scales
            #maxi = max(abs(tr1.data).max(), abs(tr2.data).max(), abs(tr3.data).max())
            '''
            maxi = max(abs(tr1.data).max(), abs(tr2.data).max())
            tr1_data = tr1.data/abs(maxi)
            tr2_data = tr2.data/abs(maxi)
            tr3_data = tr3.data/abs(maxi)
            '''
            tr1.data = tr1.data/abs(max(tr1.data))
            tr2.data = tr2.data/abs(max(tr2.data))
        
            cc_np = tr1.stats.sampling_rate * max_ts
            np_shift, coeff = cross_correlation.xcorr(tr1, tr2, int(cc_np))
            t_shift = float(np_shift)/tr1.stats.sampling_rate
            
            # scale_str shows whether the scale of the waveforms are the same or not
            # if scale_str = 'Y' then the scale is correct.
            scale_str = 'Y'
            
            if abs(tr1.data).max() > 2.0 * abs(tr2.data).max():
                label_tr1 = ls_first.split('/')[-2]
                label_tr2 = ls_second[0].split('/')[-2]
                print '#####################################################'
                print "Scale is not correct! " + label_tr1 + '>' + label_tr2
                print '#####################################################'
                scale_str = 'N'
            elif abs(tr2.data).max() >= 2.0 * abs(tr1.data).max():
                label_tr1 = ls_first.split('/')[-2]
                label_tr2 = ls_second[0].split('/')[-2]
                print '#####################################################'
                print "Scale is not correct! " + label_tr2 + '>' + label_tr1
                print '#####################################################'
                scale_str = 'N'
            
            if not str(coeff) == 'nan':
                cc_open.writelines(id_name + ',' + str(round(coeff, 4)) + ',' + str(t_shift) + \
                                                ',' + scale_str + ',' + '\n')
                                
            print "Cross Correlation:"
            print id_name
            print "Shift:       " + str(t_shift)
            print "Coefficient: " + str(coeff)
            print print_sta
            print '------------------'
       
            cc_open.close()
            cc_open.close()
    
    except Exception, error:
        print '##################'
        print error
        print '##################'
tiemp = []
tiems = []
for i in range(0,len(arrivals_p)): tiemp.append(arrivals_p[i].time)
for ii in range(0,len(arrivals_s)): tiems.append(arrivals_s[ii].time)

# first arrivals
arriv_p = min(tiemp)
arriv_s = min(tiems)

sampling_rate = int(RLAS[0].stats.sampling_rate)
sec = 60  # window length for correlation

# calculate correlation coefficients
corrcoefs = []
for ic in xrange(0, len(RLAS[0]) // (int(sampling_rate * sec))):
        coeffs = xcorr(RLAS[0].data[sampling_rate * sec * ic : sampling_rate * sec * (ic + 1)],
                       AC[0].data[sampling_rate * sec * ic : sampling_rate * sec * (ic + 1)], 0)
        corrcoefs.append(coeffs[1])

# estimate the Backazimuth for each time window 
step = 10
backas = np.linspace(0, 360 - step, 360 / step)
corrbaz = []
ind=None
for i6 in xrange(0, len(backas)):
    for i7 in xrange(0, len(corrcoefs)):
        corrbazz = xcorr(RLAS[0][sampling_rate * sec * i7 : sampling_rate * sec * (i7 + 1)],
                             rotate_ne_rt(AC_original.select(component='N')[0].data, 
                                          AC_original.select(component='E')[0].data, backas[i6])
                             [1][sampling_rate * sec * i7 : sampling_rate * sec * (i7 + 1)],0)
        corrbaz.append(corrbazz[1])
corrbaz = np.asarray(corrbaz)
Esempio n. 37
0
def cross_net(stream, env=False, debug=0, master=False):
    r"""Function to generate picks for each channel based on optimal moveout \
    defined by maximum cross-correaltion with master trace.  Master trace \
    will be the first trace in the stream.

    :type stream: :class: obspy.Stream
    :param stream: Stream to pick
    :type envelope: bool
    :param envelope: To compute cross-correlations on the envelope or not.
    :type debug: int
    :param debug: Debug level from 0-5
    :type master: obspy.Trace
    :param master: Trace to use as master, if False, will use the first trace \
            in stream.

    :returns: list of pick class
    """
    from obspy.signal.cross_correlation import xcorr
    from obspy.signal.filter import envelope
    from eqcorrscan.utils.Sfile_util import PICK
    import matplotlib.pyplot as plt
    import numpy as np
    picks = []
    samp_rate = stream[0].stats.sampling_rate
    if not env:
        if debug > 2:
            print('Using the raw data')
        st = stream.copy()
        st.resample(samp_rate)
    else:
        st = stream.copy()
        if debug > 2:
            print('Computing envelope')
        for tr in st:
            tr.resample(samp_rate)
            tr.data = envelope(tr.data)
    if debug > 2:
        st.plot(equal_scale=False, size=(800, 600))
    if not master:
        master = st[0]
    else:
        master = master
    master.data = np.nan_to_num(master.data)
    for tr in st:
        tr.data = np.nan_to_num(tr.data)
        if debug > 2:
            msg = ' '.join(['Comparing', tr.stats.station, tr.stats.channel,
                            'with the master'])
            print(msg)
        shift_len = int(0.3 * len(tr))
        if debug > 2:
            print('Shift length is set to ' + str(shift_len) + ' samples')
        if debug > 3:
            index, cc, cc_vec = xcorr(master, tr, shift_len, full_xcorr=True)
            cc_vec = np.nan_to_num(cc_vec)
            if debug > 4:
                print(cc_vec)
            fig = plt.figure()
            ax1 = fig.add_subplot(211)
            x = np.linspace(0, len(master) / samp_rate,
                            len(master))
            ax1.plot(x, master.data / float(master.data.max()), 'k',
                     label='Master')
            ax1.plot(x + (index / samp_rate), tr.data / float(tr.data.max()),
                     'r', label='Slave shifted')
            ax1.legend(loc="lower right", prop={'size': "small"})
            ax1.set_xlabel("time [s]")
            ax1.set_ylabel("norm. amplitude")
            ax2 = fig.add_subplot(212)
            print(len(cc_vec))
            x = np.linspace(0, len(cc_vec) / samp_rate, len(cc_vec))
            ax2.plot(x, cc_vec, label='xcorr')
            # ax2.set_ylim(-1, 1)
            # ax2.set_xlim(0, len(master))
            plt.show()
        index, cc = xcorr(master, tr, shift_len)
        pick = PICK(station=tr.stats.station,
                    channel=tr.stats.channel,
                    impulsivity='E',
                    phase='S',
                    weight='1',
                    time=tr.stats.starttime + (index / tr.stats.sampling_rate))
        if debug > 2:
            print(pick)
        picks.append(pick)
    del st
    return picks
# station location (Wettzell)
station_latitude = 49.144001
station_longitude = 12.8782

# theoretical backazimuth and distance
baz = gps2dist_azimuth(source_latitude, source_longitude, station_latitude, station_longitude)


sampling_rate = int(RLAS[0].stats.sampling_rate)
sec = 120  # window length for correlation (teleseismic event)

# calculate correlation coefficients
corrcoefs = []
for ic in xrange(0, len(RLAS[0]) // (int(sampling_rate * sec))):
        coeffs = xcorr(RLAS[0].data[sampling_rate * sec * ic : sampling_rate * sec * (ic + 1)],
                       AC[0].data[sampling_rate * sec * ic : sampling_rate * sec * (ic + 1)], 0)
        corrcoefs.append(coeffs[1])






TauPy_model = TauPyModel('ak135')
arrivals_p = TauPy_model.get_travel_times(distance_in_degree=0.001 * baz[0] / 111.11, 
                                        source_depth_in_km=event.origins[0].depth*0.001,
                                       phase_list=["P","p","Pdiff","PP","PKiKP","PKIKP","Pn","Pg"])
arrivals_s = TauPy_model.get_travel_times(distance_in_degree=0.001 * baz[0] / 111.11, 
                                        source_depth_in_km=event.origins[0].depth*0.001,
                                       phase_list=["S","s","Sdiff","SS","SKiKS","SKIKS","Sn","Sg"])
tiemp = []
Esempio n. 39
0
def PEXCorr1(st1, st2, maxlag):
    st1 = st1/numpy.linalg.norm(st1)
    st2 = st2/numpy.linalg.norm(st2)
    return xcorr(st1, st2, maxlag, full_xcorr=True)[2]
Esempio n. 40
0
print st_DEC[0].stats.starttime
print st_FEB[0].stats.starttime

epsis_DEC=[]
epsis_FEB=[]

for ch in range(6,25):
  #### 1-bit normalization:
  tr1_DEC = np.sign(st_DEC[ch].data)
  tr2_DEC = np.sign(st_DEC[ch+2].data)
  tr1_FEB = np.sign(st_FEB[ch].data)
  tr2_FEB = np.sign(st_FEB[ch+2].data)


  index1, value1, corr_DEC = xcorr(tr1_DEC, tr2_DEC, 1500, full_xcorr=True)
  index2, value2, corr_FEB = xcorr(tr1_FEB, tr2_FEB, 1500, full_xcorr=True)

  #### NORMALIZE:
  corr_DEC = corr_DEC/np.max(np.abs(corr_DEC))
  corr_FEB = corr_FEB/np.max(np.abs(corr_FEB))
  ref = (corr_DEC + corr_FEB)/np.max(np.abs(corr_DEC+corr_FEB))


  #plt.plot(corr_DEC, color='r')
  #plt.plot(corr_FEB, color='b')
  #plt.plot(ref, color='k')
  #plt.show()

  #### STRETCHING:
  epsilons = np.linspace(-0.1, 0.1, 501)
Esempio n. 41
0
 stack=chan_traces[-1]
 chan_traces=[st[0] for st in chan_traces]
 if plotvar:
     fig, axes = plt.subplots(len(chan_traces)+1, 1, sharex=True,\
                              figsize=(7, 12))
     axes=axes.ravel()
     axes[0].plot(stack[0].data, 'r', linewidth=1.5)
     axes[0].set_title(chan_traces[0].stats.station+'.'+\
                       chan_traces[0].stats.channel)
     axes[0].set_ylabel('Stack')
 for i, tr in enumerate(chan_traces):
     if plotvar:
         axes[i+1].plot(tr.data, 'k', linewidth=1.5)
     # corr = normxcorr2(tr.data.astype(np.float32),\
                       # stack[0].data.astype(np.float32))
     dummy, corr = xcorr(tr.data.astype(np.float32),\
                          stack[0].data.astype(np.float32), 1)
     corr=np.array(corr).reshape(1,1)
     if plotvar:
         axes[i+1].set_ylabel(str(round(corr[0][0],2)))
     if corr[0][0] < corr_thresh:
         # Remove the channel
         print str(corr)+' for channel '+tr.stats.station+'.'+\
                 tr.stats.channel+' event '+str(i)
         all_detection_streams[event_list[i]].remove(tr)
     else:
         final_event_list.append(event_list[i])
 if plotvar:
    plt.show()
 # We should require at-least three detections per channel used
 # Compute the SVD
 if len(final_event_list) >= 3:
Esempio n. 42
0
plt.plot(cor_n[510:1029])

plt.subplot(414)
plt.title("FFT of noise")
plt.plot(syn_fft)
plt.show()

# <codecell>

plt.scatter(snr,cor_coef[0])
plt.ylim([0, 1])
plt.xlabel("SNR")
plt.ylabel("Correlation coefficient")
plt.show()

# <codecell>

len(syn_wav3.T)

# <codecell>

shift, coep, cor_o = xcorr(syn_wav2, syn_wav2, 100, full_xcorr=True)

# <codecell>

coep

# <codecell>


Esempio n. 43
0
def stretching(signalRef, signalStr, epsilons, timevec, starttime=None, endtime=None):
	"""
	Calculates the stretching factor eps. This is the factor with which a signal (signalStr)
	must be stretched to get the highest correlation with a reference signal (signalRef).
	The factor eps is chosen from an array epsilons. The time vector of both signals is timevec.
	If starttime and endtime for a time window are provided, eps is calcutlated for the positive
	and negative time window as well for both time windows together. Starttime and endtime refer
	to the positive time window; from that a negative time window is calculated
	(e.g. starttime = 20.0, endtime = 50.0 --> -20.0 and -50.0 for the negative window).
	If no starttime and endtime are given, eps is computed for the whole data.
	"""

   
	if starttime!=None and endtime!=None: # eps for time windows

		if endtime > timevec[-1]:
			raise ValueError('Time window exceeds bound of time vector!')
		if starttime < 0.0:
			raise ValueError('Positive and negative time window are overlapping!')
		if starttime > endtime:
			raise ValueError('Starttime must be smaller than endtime!')	

		# indices of starttime and endtime of the time windows
		pos_t1 = np.abs(timevec-starttime).argmin()
		pos_t2 = np.abs(timevec-endtime).argmin()
		neg_t1 = np.abs(timevec-(-endtime)).argmin()
		neg_t2 = np.abs(timevec-(-starttime)).argmin()
		
		# taper the time windows
		pos_time = timevec[pos_t1:(pos_t2+1)]
		pos_taper_percentage = 0.1
		pos_taper = np.blackman(int(len(pos_time) * pos_taper_percentage))
		pos_taper_left, pos_taper_right = np.array_split(pos_taper, 2)
		pos_taper = np.concatenate([pos_taper_left, np.ones(len(pos_time)-len(pos_taper)), pos_taper_right])

		neg_time = timevec[neg_t1:(neg_t2+1)]
		neg_taper_percentage = 0.1
		neg_taper = np.blackman(int(len(neg_time) * neg_taper_percentage))
		neg_taper_left, neg_taper_right = np.array_split(neg_taper, 2)
		neg_taper = np.concatenate([neg_taper_left, np.ones(len(neg_time)-len(neg_taper)), neg_taper_right])

		both_time = np.concatenate([neg_time, pos_time])
		both_taper_percentage = 0.1
		both_taper = np.blackman(int(len(both_time) * both_taper_percentage))
		both_taper_left, both_taper_right = np.array_split(both_taper, 2)
		both_taper = np.concatenate([both_taper_left, np.ones(len(both_time)-len(both_taper)), both_taper_right])

		pos_signalRef = pos_taper * signalRef[pos_t1:(pos_t2+1)]
		pos_signalStr = pos_taper * signalStr[pos_t1:(pos_t2+1)]
		neg_signalRef = neg_taper * signalRef[neg_t1:(neg_t2+1)]
		neg_signalStr = neg_taper * signalStr[neg_t1:(neg_t2+1)]
		both_signalRef = both_taper * np.concatenate([signalRef[neg_t1:(neg_t2+1)], signalRef[pos_t1:(pos_t2+1)]])
		both_signalStr = both_taper * np.concatenate([signalStr[neg_t1:(neg_t2+1)], signalStr[pos_t1:(pos_t2+1)]])
		
		# calculate the correlation coefficient CC for each epsilon
		posCC = []
		negCC = []
		bothCC = []
		for i in xrange(0,len(epsilons),1):
			# positive time window
			pos_time_new = (1.0-epsilons[i])*pos_time
			pos_s = InterpolatedUnivariateSpline(pos_time_new, pos_signalStr)
			pos_stretch = pos_s(pos_time)
			pos_coeffs = xcorr(pos_stretch,pos_signalRef,0)
			posCC.append(pos_coeffs[1])
			
			# negative time window
			neg_time_new = (1.0-epsilons[i])*neg_time
			neg_s = InterpolatedUnivariateSpline(neg_time_new, neg_signalStr)
			neg_stretch = neg_s(neg_time)
			neg_coeffs = xcorr(neg_stretch,neg_signalRef,0)
			negCC.append(neg_coeffs[1])
			
			# both time windows
			both_time_new = (1.0-epsilons[i])*both_time
			both_s = InterpolatedUnivariateSpline(both_time_new, both_signalStr)
			both_stretch = both_s(both_time)
			both_coeffs = xcorr(both_stretch,both_signalRef,0)
			bothCC.append(both_coeffs[1])
			
			
		# determine the max. CC and corresponding epsilon
		posmaxCC = max(posCC)
		posindex = posCC.index(posmaxCC)
		poseps = epsilons[posindex]

		negmaxCC = max(negCC)
		negindex = negCC.index(negmaxCC)
		negeps = epsilons[negindex]	

		bothmaxCC = max(bothCC)
		bothindex = bothCC.index(bothmaxCC)
		botheps = epsilons[bothindex]

		return poseps, posmaxCC, negeps, negmaxCC, botheps, bothmaxCC
	
	elif (starttime == None and endtime != None) or (starttime != None and endtime == None):
		raise SyntaxError('Both starttime and endtime must be given!')


	else: # eps for whole data

		# taper the signal and the reference
		taper_percentage = 0.1
		taper = np.blackman(int(len(timevec) * taper_percentage))
		taper_left, taper_right = np.array_split(taper, 2)
		taper = np.concatenate([taper_left, np.ones(len(timevec)-len(taper)), taper_right])

		signalStr = signalStr * taper
		signalRef = signalRef * taper
		
		# calculate the correlation coefficient CC for each epsilon
		CC = []
		for i in xrange(0,len(epsilons),1):
			time_new = (1.0-epsilons[i])*timevec
			s = InterpolatedUnivariateSpline(time_new, signalStr)
			stretch = s(timevec)
			coeffs = xcorr(stretch,signalRef,0)
			CC.append(coeffs[1])
		
		# determine the max. CC and corresponding epsilon
		maxCC = max(CC)
		index = CC.index(maxCC)
		eps = epsilons[index]
	
		return eps, maxCC
Esempio n. 44
0
def detections_2_cat(detections, template_dict, stream, temp_prepick, max_lag, cc_thresh,
                     extract_pre_pick=3.0, extract_post_pick=7.0, write_wav=False, debug=0):
    r"""Function to create a catalog from a list of detections, adjusting template pick \
    times using cross correlation with data stream at the time of detection.

    :type detections: list of DETECTION objects
    :param detections: Detections which we want to extract and locate.
    :type template_dict: dict
    :param template_dict: Dictionary of template name: template stream for the entire \
        catalog. Template names must be in the format found in the DETECTION objects.
    :type stream: obspy.Stream
    :param stream: stream encompassing time span of the detections. Will be used for pick \
        refinement by cross correlation. Should be fed a stream processed in the same way \
        as the streams in template dict (and in the same way that they were processed \
        during matched filtering). The waveforms will not be processed here.
    :type write_wav: bool or str
    :param write_wav: If false, will not write detection waveforms to miniseed files. \
        Otherwise, specify a directory to write the templates to. Will use name \
        template_name_detection_time.mseed.
    :returns: :class: obspy.Catalog
    """

    from obspy import UTCDateTime, Catalog, Stream
    from obspy.core.event import ResourceIdentifier, Event, Pick, CreationInfo, Comment, WaveformStreamID
    from obspy.signal.cross_correlation import xcorr
    from eqcorrscan.utils import plotting

    #XXX TODO Scripts havent been saving the actual detection objects so we cannot make
    #XXX TODO use of DETECTION.chans. Would be useful.

    # Copy stream out of the way
    st = stream.copy()
    # Create nested dictionary of delays template_name: stachan: delay
    # dict.items() works in both python 2 and 3 but is memory inefficient in 2 as both vars are
    # read into memory as lists
    delays = {}
    for name, temp in template_dict.items():
        sorted_temp = temp.sort(['starttime'])
        stachans = [(tr.stats.station, tr.stats.channel, tr.stats.network)
                    for tr in sorted_temp]
        mintime = sorted_temp[0].stats.starttime
        delays[name] = {(tr.stats.station, tr.stats.channel): tr.stats.starttime - mintime
                        for tr in sorted_temp}
    # Loop over all detections, saving each as a new event in a catalog
    new_cat = Catalog()
    for detection in detections:
        if write_wav:
            new_stream = Stream()
        if hasattr(detection, 'event'):
            new_event = detection.event
        else:
            rid = ResourceIdentifier(id=detection.template_name + '_' +\
                                        detection.detect_time.strftime('%Y%m%dT%H%M%S.%f'),
                                     prefix='smi:local')
            new_event = Event(resource_id=rid)
            cr_i = CreationInfo(author='EQcorrscan',
                                creation_time=UTCDateTime())
            new_event.creation_info = cr_i
            thresh_str = 'threshold=' + str(detection.threshold)
            ccc_str = 'detect_val=' + str(detection.detect_val)
            det_time_str = 'det_time=%s' % str(detection.detect_time)
            if detection.chans:
                used_chans = 'channels used: ' + \
                             ' '.join([str(pair) for pair in detection.chans])
                new_event.comments.append(Comment(text=used_chans))
            new_event.comments.append(Comment(text=thresh_str))
            new_event.comments.append(Comment(text=ccc_str))
            new_event.comments.append(Comment(text=det_time_str))
        template = template_dict[detection.template_name]
        temp_len = template[0].stats.npts * template[0].stats.sampling_rate
        if template.sort(['starttime'])[0].stats.starttime == detection.detect_time:
            print('Template %s detected itself at %s.' % (detection.template_name, str(detection.detect_time)))
            new_event.resource_id = ResourceIdentifier(id=detection.template_name + '_self',
                                                       prefix='smi:local')
        if debug >= 2:
            print('Plotting detection for template: %s' % detection.template_name)
            plt_st = Stream([st.select(station=tr.stats.station,
                                       channel=tr.stats.channel)[0].slice(detection.detect_time-extract_pre_pick,
                                                                          detection.detect_time+extract_post_pick)
                             for tr in template if len(st.select(station=tr.stats.station,
                                                                 channel=tr.stats.channel)) > 0])
            plotting.detection_multiplot(plt_st, template, [detection.detect_time.datetime])
        # Loop over each trace in the template, correcting picks for new event if need be
        for tr in template:
            sta = tr.stats.station
            chan = tr.stats.channel
            if len(st.select(station=sta, channel=chan)) != 0:
                st_tr = st.select(station=sta, channel=chan)[0]
            else:
                print('No stream for %s: %s' % (sta, chan))
                continue
            st_tr_pick = detection.detect_time + delays[detection.template_name][(sta, chan)] + temp_prepick
            i, absval, full_corr = xcorr(tr, st_tr.slice(st_tr_pick - temp_prepick,
                                                            st_tr_pick - temp_prepick + temp_len),
                                            shift_len=max_lag, full_xcorr=True)
            ccval = max(full_corr)
            index = np.argmax(full_corr) - max_lag
            pk_str = 'ccval=' + str(ccval)
            if index == 0 or index == max_lag * 2:
                msg = 'Correlation correction at max_lag. Consider increasing max_lag.'
                warnings.warn(msg)
            if debug >= 3:
                print('Plotting full correlation function')
                print('index: %d' % index)
                print('max_ccval: %.2f' % ccval)
                plt.plot(full_corr)
                plt.show()
                plt.close()
            if ccval > cc_thresh:
                print('Threshold exceeded at %s: %s' % (sta, chan))
                pick_tm = st_tr_pick + (index / tr.stats.sampling_rate)
            else:
                print('Correlation at %s: %s not good enough to correct pick' % (sta, chan))
                pick_tm = st_tr_pick
            if tr.stats.channel[-1] in ['Z']:
                phase_hint = 'P'
            elif tr.stats.channel[-1] in ['N', 'E', '1', '2']:
                phase_hint = 'S'
            wv_id = WaveformStreamID(network_code=tr.stats.network,
                                     station_code=tr.stats.station,
                                     channel_code=tr.stats.channel)
            new_event.picks.append(Pick(time=pick_tm, waveform_id=wv_id, phase_hint=phase_hint,
                                        comments=[Comment(text=pk_str)]))
            if write_wav:
                    new_stream.append(st_tr.slice(starttime=pick_tm - extract_pre_pick,
                                                  endtime=pick_tm + extract_post_pick))
        # Append to new catalog
        new_cat += new_event
        if write_wav:
            filename = '%s%s.mseed' % (write_wav, str(new_event.resource_id))
            print('Writing new stream for detection to %s' % filename)
            new_stream.write(filename, format='MSEED')
    return new_cat
Esempio n. 45
0
def HVscheme2(st, f0, bazi, astime, aetime, disdeg, eve):
    
    st2 = finalfilter(st,f0,bazi,astime,aetime,True)
    corrs =[]
    lags = []
    win = int(round(1./f0,0))
    for window in st2.slide(window_length=win, step=int(round(win/16.,0))):
        HilbertV = np.imag(hilbert(window.select(component="Z")[0].data))
        lag, corr = xcorr(HilbertV,window.select(component="R")[0].data, 5, full_xcorr=False)

        #corr = pearsonr(HilbertV, window.select(component="R")[0].data)
        corrs.append(corr)
        lags.append(lag)
    corr = corrs
    HilbertV = np.imag(hilbert(st2.select(component="Z")[0].data))
    oldx = np.asarray(range(len(corr)))/(float(len(corr))/float(len(HilbertV)))
    corr = np.interp(range(len(HilbertV)),oldx, corr)
    lags = np.interp(range(len(HilbertV)),oldx, lags)
    env = envelope(st2.select(component="R")[0].data)*envelope(HilbertV)
    HV = envelope(st2.select(component="R")[0].data)/envelope(HilbertV)
    env *= 1./np.max(np.abs(env))
    #corr *= env
    #phase = lag*f0*360. + 90.
    
    t = np.asarray(range(len(HilbertV)))
    
    lim = t[(corr >= .90)]
    if len(lim) == 0:
        return 0, 0, 0, 0


    phase = np.mean(lags[(corr>=.90)])*f0*360. + 90.
    HV2 = HV[(corr >= .90)]
    lim = lim[(HV2 <= np.mean(HV2) + 3.*np.std(HV2)) & (HV2 >= np.mean(HV2) - 3.*np.std(HV2)) ]
    HV2 = HV2[(HV2 <= np.mean(HV2) + 3.*np.std(HV2)) & (HV2 >= np.mean(HV2) - 3.*np.std(HV2)) ]
    
    mHV = np.mean(np.log10(HV2))
    #print(str(mHV))
    med = np.median(np.log10(HV2))
    stdHV = np.std(np.log10(HV2))
    print("Here we are:" + str(mHV))
    # if mHV == 0.0:
    #     fig = plt.figure(1, figsize=(16,16))
    #     plt.subplots_adjust(hspace=0.001)
    #     plt.subplot(211)
    #     plt.title(st[0].stats.network + ' ' + st[0].stats.station + ' ' + st[0].stats.location + ' Period: ' + str(int(round(1./f0,0))) + ' s Distance: ' + str(round(disdeg,0)) + ' degrees')
    #     plt.plot(t, HilbertV*10**9, label=' Shifted Vertical')
    #     plt.xlim((min(t),max(t)))
    #     plt.plot(t,st2.select(component="R")[0].data*10**9, label='Radial')
    #     plt.ylabel('Velocity (nm/s)')
    #     plt.xticks([])
    #     plt.legend(loc=1)
    #     plt.subplot(212)
    #     plt.plot(t, HV, color='k', label='HV=' + str(round(mHV,2)) + '$\pm$' + str(round(stdHV,2)))
    #     plt.plot(t, corr, color='.5', label='Characteristic Function')
    #     plt.ylim((0., 2.))
    #     plt.yticks([0.,  1., 2.])
    #     plt.ylabel('HV Ratio')
    #     plt.axvspan(min(lim), max(lim), 0.,2.,alpha=.3, color='.5')
    #     plt.xlim((min(t),max(t)))
    #     plt.xlabel('Time (s)')
    #     plt.legend(loc=1)
    #     plt.show()
    ##plt.clf()
    #plt.savefig('PLT_' + st[0].stats.network + '_' + st[0].stats.station + '_' + st[0].stats.location + '_' + str(eve.origins[0].time.year) + '_' + str(eve.origins[0].time.julday).zfill(3) + \
            #'_' + str(eve.origins[0].time.hour).zfill(2) + '_' + str(eve.origins[0].time.minute).zfill(2) + '_' + str(int(round(1./f0,0))) + '.png', format='PNG', dpi=400)
    #plt.clf()
    
    print(str(med))
    return mHV, stdHV, phase, med
Esempio n. 46
0
def cross_net(stream, env=False, debug=0, master=False):
    """
    Generate picks using a simple envelope cross-correlation.
    Picks are made for each channel based on optimal moveout \
    defined by maximum cross-correlation with master trace.  Master trace \
    will be the first trace in the stream.

    :type stream: :class: obspy.Stream
    :param stream: Stream to pick
    :type env: bool
    :param env: To compute cross-correlations on the envelope or not.
    :type debug: int
    :param debug: Debug level from 0-5
    :type master: obspy.Trace
    :param master: Trace to use as master, if False, will use the first trace \
            in stream.

    :returns: obspy.core.event.Event

    .. rubric:: Example

    >>> from obspy import read
    >>> from eqcorrscan.utils.picker import cross_net
    >>> st = read()
    >>> event = cross_net(st, env=True)
    >>> event.creation_info.author
    'EQcorrscan'
    """
    from obspy.signal.cross_correlation import xcorr
    from obspy.signal.filter import envelope
    from obspy import UTCDateTime
    from obspy.core.event import Event, Pick, WaveformStreamID
    from obspy.core.event import CreationInfo, Comment, Origin
    import matplotlib.pyplot as plt
    import numpy as np

    event = Event()
    event.origins.append(Origin())
    event.creation_info = CreationInfo(author='EQcorrscan',
                                       creation_time=UTCDateTime())
    event.comments.append(Comment(text='cross_net'))
    samp_rate = stream[0].stats.sampling_rate
    if not env:
        if debug > 2:
            print('Using the raw data')
        st = stream.copy()
        st.resample(samp_rate)
    else:
        st = stream.copy()
        if debug > 2:
            print('Computing envelope')
        for tr in st:
            tr.resample(samp_rate)
            tr.data = envelope(tr.data)
    if debug > 2:
        st.plot(equal_scale=False, size=(800, 600))
    if not master:
        master = st[0]
    else:
        master = master
    master.data = np.nan_to_num(master.data)
    for i, tr in enumerate(st):
        tr.data = np.nan_to_num(tr.data)
        if debug > 2:
            msg = ' '.join(['Comparing', tr.stats.station, tr.stats.channel,
                            'with the master'])
            print(msg)
        shift_len = int(0.3 * len(tr))
        if debug > 2:
            print('Shift length is set to ' + str(shift_len) + ' samples')
        if debug > 3:
            index, cc, cc_vec = xcorr(master, tr, shift_len, full_xcorr=True)
            cc_vec = np.nan_to_num(cc_vec)
            if debug > 4:
                print(cc_vec)
            fig = plt.figure()
            ax1 = fig.add_subplot(211)
            x = np.linspace(0, len(master) / samp_rate,
                            len(master))
            ax1.plot(x, master.data / float(master.data.max()), 'k',
                     label='Master')
            ax1.plot(x + (index / samp_rate), tr.data / float(tr.data.max()),
                     'r', label='Slave shifted')
            ax1.legend(loc="lower right", prop={'size': "small"})
            ax1.set_xlabel("time [s]")
            ax1.set_ylabel("norm. amplitude")
            ax2 = fig.add_subplot(212)
            print(len(cc_vec))
            x = np.linspace(0, len(cc_vec) / samp_rate, len(cc_vec))
            ax2.plot(x, cc_vec, label='xcorr')
            # ax2.set_ylim(-1, 1)
            # ax2.set_xlim(0, len(master))
            plt.show()
        index, cc = xcorr(master, tr, shift_len)
        wav_id = WaveformStreamID(station_code=tr.stats.station,
                                  channel_code=tr.stats.channel,
                                  network_code=tr.stats.network)
        event.picks.append(Pick(time=tr.stats.starttime + (index / tr.stats.sampling_rate),
                                waveform_id=wav_id,
                                phase_hint='S',
                                onset='emergent'))
        if debug > 2:
            print(event.picks[i])
    event.origins[0].time = min([pick.time for pick in event.picks]) - 1
    event.origins[0].latitude = float('nan')
    event.origins[0].longitude = float('nan')
    # Set arbitrary origin time
    del st
    return event
Esempio n. 47
0
 def corrEW(self):
     windowLen = self.stref[1].data.length() / 2
     a, corValue = xcorr(self.stref[1].data, self.sttest[1].data, windowLen)
     return corValue
Esempio n. 48
0
def crossc(dstart,dend,ch1,ch2,day):
# here you load all the functions you need to use

  from obspy.seg2.seg2 import readSEG2
  from obspy.core import Stream
  import numpy as np
  from obspy.signal.cross_correlation import xcorr
  from numpy import sign
  from obspy.signal.filter import lowpass
  from obspy.signal.filter import highpass
  from obspy.signal.filter import bandstop
  from obspy.signal.filter import bandpass
  dataDir = "/import/three-data/hadzii/STEINACH/STEINACH_longtime/"
  outdir = "/home/jsalvermoser/Desktop/Processing/bands_SNR/" + "CH" + str(ch1) + "_CH" + str(ch2) + "/" + "JAN" + str(day) + "/"


	# loading the info for outfile-name
  stream_start = readSEG2(dataDir + str(dstart) + ".dat")
  t_start = stream_start[ch1].stats.seg2.ACQUISITION_TIME
  stream_end = readSEG2(dataDir + str(dend) + ".dat")
  t_end = stream_end[ch1].stats.seg2.ACQUISITION_TIME

	# initialization of the arrays and variables
  TR = []
  rms = []
  sq = []
  ncalm = 1
  nbeat  = 1
  corr128_calm = 0
  corr128_beat = 0
  nerror = 0
  mu1c=0
  mu2c=0
  mu3c=0
  mu1b=0
  mu2b=0
  mu3b=0
  var1c=0
  var2c=0
  var3c=0
  var1b=0
  var2b=0
  var3b=0
  SNR_calm_b1=[]
  SNR_calm_b2=[]
  SNR_calm_b3=[]
  SNR_beat_b1=[]
  SNR_beat_b2=[]
  SNR_beat_b3=[]
  
  
  #TAPER
  taper_percentage=0.05
  taper= np.blackman(int(len(time_vector) * taper_percentage))
  taper_left, taper_right = np.array_split(taper,2)
  taper = np.concatenate([taper_left,np.ones(len(time_vector)-len(taper)),taper_right])
  
  for j in range(0, dend-dstart):
    sq.append([])



  for k in range(dstart, dend, 4):
    start = k
    end = k + 5 # only used to merge 5-1 = 4 files to one stream
    try:  
		 st1 = merge_single(ch1,start,end)
		 st2 = merge_single(ch2,start,end)
		 st1.detrend('linear')  
		 st2.detrend('linear') 
		 # calculate squares for rms
		 r = k-dstart
		 sq[r] = 0
		 for h in range(0,64000):
		   sq[r] += (st1[0].data[h])**2   
		     # lowpass-filter the crossc_beat correlation function 
		 st1.filter('lowpass',freq = 24, zerophase=True, corners=8)
		 st1.filter('highpass', freq= 0.05, zerophase=True, corners=2) #had to be reduced from 0.1Hz
		 st1.filter('bandstop', freqmin=8, freqmax=14, corners=4, zerophase=True)
		 st2.filter('lowpass',freq = 24, zerophase=True, corners=8)
		 st2.filter('highpass', freq= 0.05, zerophase=True, corners=2) #had to be reduced from 0.1Hz
		 st2.filter('bandstop', freqmin=8, freqmax=14, corners=4, zerophase=True)
		 
		 	# sometimes channels seem to fail, so I put this to prevent crashing of the program
		 
		 
		 	# 1-bit normalization
		 tr1 = sign(st1[0].data)
		 tr2 = sign(st2[0].data)    	
		 	
		 # cross-correlation
		 index, value, acorr = xcorr(tr1, tr2, 25000, full_xcorr=True)
		
		 print sq[r]
		 
		 # check sanity
		 if np.max(acorr)>1:
		   acorr = zeros(50001)   
		   
		 # sort the 128sec files into calm and beat:
		 # the value was chosen after observing calm files
		 	
		 if sq[r] < 1000000000000:
		     corr128_calm += acorr
		     ncalm += 1.
		 else:
		     corr128_beat += acorr
		     nbeat += 1.
		 print ncalm,  nbeat  # just to check if calm or noisy
    except:
      nerror += 1
      print "%d : ERROR" %(r)
	 	
	 	
  if ncalm<8:
  	corr128_calm = np.zeros(50001)
  	
  # normalization	 	
  else:
  	corr128_calm = (corr128_calm/ncalm) * taper
  	
  corr128_beat = (corr128_beat/nbeat) * taper


  # filter again and divide into 3 bands which can be investigated separately
  
  corr128_calm_band1 = highpass(corr128_calm, freq=0.1, corners=4, zerophase=True, df=500.)
  corr128_calm_band1 = lowpass(corr128_calm_band1, freq=2, corners=4, zerophase=True, df=500.)
  corr128_calm_band2 = bandpass(corr128_calm, freqmin=2, freqmax=8, df=500., corners=4, zerophase=True)
  corr128_calm_band3 = bandpass(corr128_calm, freqmin=8, freqmax=24, df=500., corners=4, zerophase=True)
  corr128_beat_band1 = highpass(corr128_beat, freq=0.1, df=500., corners=4, zerophase=True)
  corr128_beat_band1 = lowpass(corr128_beat_band1, freq=2, corners=4, zerophase=True, df=500.)
  corr128_beat_band2 = bandpass(corr128_beat, freqmin=2, freqmax=8, df=500., corners=4, zerophase=True)
  corr128_beat_band3 = bandpass(corr128_beat, freqmin=8, freqmax=24, df=500., corners=4, zerophase=True)
  
  # SNR (Signal-to-Noise Ratio):print 222222
  # for the signal-to-noise ratio one divides the maximum of the signal by the
  # variance of a late window (noise). As we don't know which window has the
  # lowest signal fraction, we loop over some windows. We need windows of 
  # different lengths for the different bands as different frequencies are 
  # contained. For every band the minimum-frequency fmin is chosen (e.g. 4Hz), then
  # the time for one cyle is 1/fc (e.g. 0.25s) and as we take windows of 3-4 
  # cycles we choose a window length of 4*0.25s = 1s
  
  ## CALM + BEAT
  for isnrb1 in range(45000,50000,2500):  # steps of half a windowlength
    endwb1=isnrb1 + 2500  # 5s window
    SNR_calm_b1.append(np.max(np.abs(corr128_calm_band1))/np.std(corr128_calm_band1[isnrb1:endwb1]))
    SNR_beat_b1.append(np.max(np.abs(corr128_beat_band1))/np.std(corr128_beat_band1[isnrb1:endwb1]))
  SNR_calm_b1 = max(SNR_calm_b1)
  SNR_beat_b1 = max(SNR_beat_b1)
  
  for isnrb2 in range(45000,49001,500):  # steps of half a windowlength
    endwb2=isnrb2 + 1000  # 2s windows
    SNR_calm_b2.append(np.max(np.abs(corr128_calm_band2))/np.std(corr128_calm_band2[isnrb2:endwb2]))
    SNR_beat_b2.append(np.max(np.abs(corr128_beat_band2))/np.std(corr128_beat_band2[isnrb2:endwb2]))
  SNR_beat_b2 = max(SNR_beat_b2)
  SNR_calm_b2 = max(SNR_calm_b2)
  
  for isnrb3 in range(45000,49751,125):  # steps of half a windowlength
    endwb3=isnrb3 + 250  # 0.5s windows
    SNR_calm_b3.append(np.max(np.abs(corr128_calm_band3))/np.std(corr128_calm_band3[isnrb3:endwb3]))
    SNR_beat_b3.append(np.max(np.abs(corr128_beat_band3))/np.std(corr128_beat_band3[isnrb3:endwb3]))
  SNR_beat_b3 = max(SNR_beat_b3)
  SNR_calm_b3 = max(SNR_calm_b3)
  
  if ncalm<8:
  	SNR_calm_b1 = 0
  	SNR_calm_b2 = 0
  	SNR_calm_b3 = 0

  print SNR_calm_b1, SNR_calm_b2, SNR_calm_b3
  print SNR_beat_b1, SNR_beat_b2, SNR_beat_b3
    	
  # RMS for histogram and sifting:
  #for s in range(0,dend-dstart):
  #  rms.append((sq[s]/16000)**(0.5))

  # save into files:
  np.save(outdir + t_start + "-" +  t_end + "CH" + str(ch1) + "_" +"xcorr128s_beat_0-2Hz" + "_" + "CH" + str(ch2), corr128_beat_band1)
  np.save(outdir + t_start + "-" +  t_end + "CH" + str(ch1) + "_" +"xcorr128s_beat_2-8Hz" + "_" + "CH" + str(ch2), corr128_beat_band2)
  np.save(outdir + t_start + "-" +  t_end + "CH" + str(ch1) + "_" +"xcorr128s_beat_8-24Hz" + "_" + "CH" + str(ch2), corr128_beat_band3)
  
  np.save(outdir + t_start + "-" +  t_end + "CH" + str(ch1) + "_" +"xcorr128s_calm_0-2Hz" + "_" + "CH" + str(ch2), corr128_calm_band1)
  np.save(outdir + t_start + "-" +  t_end + "CH" + str(ch1) + "_" +"xcorr128s_calm_2-8Hz" + "_" + "CH" + str(ch2), corr128_calm_band2)
  np.save(outdir + t_start + "-" +  t_end + "CH" + str(ch1) + "_" +"xcorr128s_calm_8-24Hz" + "_" + "CH" + str(ch2), corr128_calm_band3) 

  # np.save(outdir + "JAN_"+"CH" + str(ch1) + "_" +"RMS" + "_" + "CH" + str(ch2) + str(dstart) + "-" + str(dend), rms)
  
  
  return corr128_beat_band1,corr128_beat_band2,corr128_beat_band3, corr128_calm_band1,corr128_calm_band2,corr128_calm_band3, ncalm, nbeat, SNR_beat_b1, SNR_beat_b2, SNR_beat_b3, SNR_calm_b1, SNR_calm_b2, SNR_calm_b3
Esempio n. 49
0
def single_comparison():
    
    """
    one by one comparison of the waveforms in the first path with the second path.
    """
    
    client = Client()
    
    global input
    
    # identity of the waveforms (first and second paths) to be compared with each other
    identity_all = input['net'] + '.' + input['sta'] + '.' + \
                    input['loc'] + '.' + input['cha']
    ls_first = glob.glob(os.path.join(input['first_path'], identity_all))
    ls_second = glob.glob(os.path.join(input['second_path'], identity_all))
    
    for i in range(0, len(ls_first)):
        try:
            tr1 = read(ls_first[i])[0]
    
            if input['phase'] != 'N':
                evsta_dist = util.locations2degrees(lat1 = tr1.stats.sac.evla, \
                                        long1 = tr1.stats.sac.evlo, lat2 = tr1.stats.sac.stla, \
                                        long2 = tr1.stats.sac.stlo)
                
                taup_tt = taup.getTravelTimes(delta = evsta_dist, depth = tr1.stats.sac.evdp)
                
                phase_exist = 'N'
                
                for tt_item in taup_tt:
                    if tt_item['phase_name'] == input['phase']:
                        print 'Requested phase:'
                        print input['phase']
                        print '------'
                        print tt_item['phase_name']
                        print 'exists in the waveform!'
                        print '-----------------------'
                        t_phase = tt_item['time']
                        
                        phase_exist = 'Y'
                        break
                        
                if phase_exist != 'Y':
                    continue
            
            # identity of the current waveform
            identity = tr1.stats.network + '.' + tr1.stats.station + '.' + \
                        tr1.stats.location + '.' + tr1.stats.channel
            
            # tr1: first path, tr2: second path, tr3: Raw data
            #tr3 = read(os.path.join(input['first_path'], '..', 'BH_RAW', identity))[0]
            
            if input['resp_paz'] == 'Y':
                response_file = os.path.join(input['first_path'], '..', 'Resp/RESP.' + identity)
                
                # Extract the PAZ info from response file
                paz = readRESP(response_file, unit = input['corr_unit'])
                
                poles = paz['poles']
                zeros = paz['zeros']
                scale_fac = paz['gain']
                sensitivity = paz['sensitivity']
            
                print paz
                
                # Convert Poles and Zeros (PAZ) to frequency response.
                h, f = pazToFreqResp(poles, zeros, scale_fac, \
                                1./tr1.stats.sampling_rate, tr1.stats.npts*2, freq=True)
                # Use the evalresp library to extract 
                # instrument response information from a SEED RESP-file.
                resp = invsim.evalresp(t_samp = 1./tr1.stats.sampling_rate, \
                        nfft = tr1.stats.npts*2, filename = response_file, \
                        date = tr1.stats.starttime, units = input['corr_unit'].upper())
            
            # Keep the current identity in a new variable
            id_name = identity
            
            try:
                tr2 = read(os.path.join(input['second_path'], identity))[0]
            except Exception, error:
                # if it is not possible to read the identity in the second path
                # then change the network part of the identity based on
                # correction unit
                identity = input['corr_unit'] + '.' + tr1.stats.station + '.' + \
                        tr1.stats.location + '.' + tr1.stats.channel
                tr2 = read(os.path.join(input['second_path'], identity))[0]
            
            if input['resample'] != 'N':
                print 'WARNING: you are using resample!!!'
                tr1.resample(input['resample'])
                tr2.resample(input['resample'])
            
            if input['tw'] == 'Y':
                t_cut_1 = tr1.stats.starttime + t_phase - input['preset']
                t_cut_2 = tr1.stats.starttime + t_phase + input['offset']
                tr1.trim(starttime = t_cut_1, endtime = t_cut_2)
                
                t_cut_1 = tr2.stats.starttime + t_phase - input['preset']
                t_cut_2 = tr2.stats.starttime + t_phase + input['offset']
                tr2.trim(starttime = t_cut_1, endtime = t_cut_2)
            
            
            if input['hlfilter'] == 'Y':
                tr1.filter('lowpass', freq=input['hfreq'], corners=2)
                tr2.filter('lowpass', freq=input['hfreq'], corners=2)
                tr1.filter('highpass', freq=input['lfreq'], corners=2)
                tr2.filter('highpass', freq=input['lfreq'], corners=2)
            
            # normalization of all three waveforms to the 
            # max(max(tr1), max(tr2), max(tr3)) to keep the scales
            #maxi = max(abs(tr1.data).max(), abs(tr2.data).max(), abs(tr3.data).max())
            
            #maxi = max(abs(tr1.data).max(), abs(tr2.data).max())
            #tr1_data = tr1.data/abs(maxi)
            #tr2_data = tr2.data/abs(maxi)
            #tr3_data = tr3.data/abs(maxi)
            
            tr1_data = tr1.data/abs(max(tr1.data))
            tr2_data = tr2.data/abs(max(tr2.data))
            
            #tr1_data = tr1.data
            #tr2_data = tr2.data*1e9
            
            print max(tr1.data)
            print max(tr2.data)
            
            # create time arrays for tr1, tr2 and tr3
            time_tr1 = np.arange(0, tr1.stats.npts/tr1.stats.sampling_rate, \
                                                1./tr1.stats.sampling_rate)
            time_tr2 = np.arange(0, tr2.stats.npts/tr2.stats.sampling_rate, \
                                                1./tr2.stats.sampling_rate)
            #time_tr3 = np.arange(0, tr3.stats.npts/tr3.stats.sampling_rate, \
            #                                    1./tr3.stats.sampling_rate)
            
            # label for plotting
            label_tr1 = ls_first[i].split('/')[-2]
            label_tr2 = ls_second[i].split('/')[-2]
            label_tr3 = 'RAW'
        
            if input['resp_paz'] == 'Y':
                # start plotting
                plt.figure()
                plt.subplot2grid((3,4), (0,0), colspan=4, rowspan=2)
                #plt.subplot(211)
            
            plt.plot(time_tr1, tr1_data, color = 'blue', label = label_tr1, lw=3)
            plt.plot(time_tr2, tr2_data, color = 'red', label = label_tr2, lw=3)
            #plt.plot(time_tr3, tr3_data, color = 'black', ls = '--', label = label_tr3)

            plt.xlabel('Time (sec)', fontsize = 'xx-large', weight = 'bold')
            
            if input['corr_unit'] == 'dis':
                ylabel_str = 'Relative Displacement'
            elif input['corr_unit'] == 'vel':
                ylabel_str = 'Relative Vel'
            elif input['corr_unit'] == 'acc':
                ylabel_str = 'Relative Acc'
            
            plt.ylabel(ylabel_str, fontsize = 'xx-large', weight = 'bold')
            
            plt.xticks(fontsize = 'xx-large', weight = 'bold')
            plt.yticks(fontsize = 'xx-large', weight = 'bold')
            
            plt.legend(loc=1,prop={'size':20})
            
            #-------------------Cross Correlation
            # 5 seconds as total length of samples to shift for cross correlation.
            
            cc_np = tr1.stats.sampling_rate * 3
            
            np_shift, coeff = cross_correlation.xcorr(tr1, tr2, int(cc_np))
            
            t_shift = float(np_shift)/tr1.stats.sampling_rate
            
            print "Cross Correlation:"
            print "Shift:       " + str(t_shift)
            print "Coefficient: " + str(coeff)
            
            plt.title('Single Comparison' + '\n' + str(t_shift) + \
                        ' sec , coeff: ' + str(round(coeff, 5)) + \
                        '\n' + id_name, \
                        fontsize = 'xx-large', weight = 'bold')
            
            if input['resp_paz'] == 'Y':
                # -----------------------
                #plt.subplot(223)
                plt.subplot2grid((3,4), (2,0), colspan=2)
                '''
                plt.plot(np.log10(f), np.log10(abs(resp)/(sensitivity*sensitivity)), \
                                            color = 'blue', label = 'RESP', lw=3)
                plt.plot(np.log10(f), np.log10(abs(h)/sensitivity), \
                                            color = 'red', label = 'PAZ', lw=3)
                '''
                plt.loglog(f, abs(resp)/(sensitivity*sensitivity), \
                                            color = 'blue', label = 'RESP', lw=3)
                plt.loglog(f, abs(h)/sensitivity, \
                                            color = 'red', label = 'PAZ', lw=3)
                
                #for j in [0.008, 0.012, 0.025, 0.5, 1, 2, 3, 4]:
                for j in [0]:
                    plt.axvline(np.log10(j), linestyle = '--')

                #plt.xlabel('Frequency [Hz]\n(power of 10)', fontsize = 'xx-large', weight = 'bold')
                #plt.ylabel('Amplitude\n      (power of 10)', fontsize = 'xx-large', weight = 'bold')
                
                plt.xlabel('Frequency [Hz]', fontsize = 'xx-large', weight = 'bold')
                plt.ylabel('Amplitude', fontsize = 'xx-large', weight = 'bold')
                
                plt.xticks(fontsize = 'xx-large', weight = 'bold')
                
                
                #plt.yticks = MaxNLocator(nbins=4)
                plt.yticks(fontsize = 'xx-large', weight = 'bold')
                plt.legend(loc=2,prop={'size':20})
                
                # -----------------------
                #plt.subplot(224)
                plt.subplot2grid((3,4), (2,2), colspan=2)

                #take negative of imaginary part
                phase_paz = np.unwrap(np.arctan2(h.imag, h.real))
                phase_resp = np.unwrap(np.arctan2(resp.imag, resp.real))
                #plt.plot(np.log10(f), phase_resp, color = 'blue', label = 'RESP', lw=3)
                #plt.plot(np.log10(f), phase_paz, color = 'red', label = 'PAZ', lw=3)
                
                plt.semilogx(f, phase_resp, color = 'blue', label = 'RESP', lw=3)
                plt.semilogx(f, phase_paz, color = 'red', label = 'PAZ', lw=3)
                
                #for j in [0.008, 0.012, 0.025, 0.5, 1, 2, 3, 4]:
                for j in [0.0]:
                    plt.axvline(np.log10(j), linestyle = '--')

                #plt.xlabel('Frequency [Hz]\n(power of 10)', fontsize = 'xx-large', weight = 'bold')
                plt.xlabel('Frequency [Hz]', fontsize = 'xx-large', weight = 'bold')
                plt.ylabel('Phase [radian]', fontsize = 'xx-large', weight = 'bold')
                
                plt.xticks(fontsize = 'xx-large', weight = 'bold')
                plt.yticks(fontsize = 'xx-large', weight = 'bold')
            
                plt.legend(loc=3,prop={'size':20})
                
                # title, centered above both subplots
                # make more room in between subplots for the ylabel of right plot
                plt.subplots_adjust(wspace=0.4, hspace=0.3)
                """
                # -----------------------
                plt.subplot(325)
                
                plt.plot(np.log10(f), np.log10(abs(resp)/(sensitivity*sensitivity)) - \
                                        np.log10(abs(h)/sensitivity), \
                                        color = 'black', label = 'RESP - PAZ')

                for j in [0.008, 0.012, 0.025, 0.5, 1, 2, 3, 4]:
                    plt.axvline(np.log10(j), linestyle = '--')

                plt.xlabel('Frequency [Hz] (power of 10)')
                plt.ylabel('Amplitude (power of 10)')

                plt.legend()
                
                # -----------------------
                plt.subplot(326)
                #take negative of imaginary part
                phase_paz = np.unwrap(np.arctan2(h.imag, h.real))
                phase_resp = np.unwrap(np.arctan2(resp.imag, resp.real))
                plt.plot(np.log10(f), np.log10(phase_resp) - np.log10(phase_paz), \
                                        color = 'black', label = 'RESP - PAZ')

                for j in [0.008, 0.012, 0.025, 0.5, 1, 2, 3, 4]:
                    plt.axvline(np.log10(j), linestyle = '--')

                plt.xlabel('Frequency [Hz] (power of 10)')
                plt.ylabel('Phase [radian] (power of 10)')

                plt.legend()

                # title, centered above both subplots
                # make more room in between subplots for the ylabel of right plot
                plt.subplots_adjust(wspace=0.3)
                """
            plt.show()
                
            
            print str(i+1) + '/' + str(len(ls_first))
            print ls_first[i]
            print '------------------'
            wait = raw_input(id_name)
            print '***************************'
            
        except Exception, error:
            print '##################'
            print error
            print '##################'
Esempio n. 50
0
def cross_net(stream, env=False, master=False):
    """
    Generate picks using a simple envelope cross-correlation.

    Picks are made for each channel based on optimal moveout defined by
    maximum cross-correlation with master trace.  Master trace will be the
    first trace in the stream if not set.  Requires good inter-station
    coherance.

    :type stream: obspy.core.stream.Stream
    :param stream: Stream to pick
    :type env: bool
    :param env: To compute cross-correlations on the envelope or not.
    :type master: obspy.core.trace.Trace
    :param master:
        Trace to use as master, if False, will use the first trace in stream.

    :returns: :class:`obspy.core.event.event.Event`

    .. rubric:: Example

    >>> from obspy import read
    >>> from eqcorrscan.utils.picker import cross_net
    >>> st = read()
    >>> event = cross_net(st, env=True)
    >>> print(event.creation_info.author)
    EQcorrscan

    .. warning::
        This routine is not designed for accurate picking, rather it can be
        used for a first-pass at picks to obtain simple locations. Based on
        the waveform-envelope cross-correlation method.
    """
    event = Event()
    event.origins.append(Origin())
    event.creation_info = CreationInfo(author='EQcorrscan',
                                       creation_time=UTCDateTime())
    event.comments.append(Comment(text='cross_net'))
    samp_rate = stream[0].stats.sampling_rate
    if not env:
        Logger.info('Using the raw data')
        st = stream.copy()
        st.resample(samp_rate)
    else:
        st = stream.copy()
        Logger.info('Computing envelope')
        for tr in st:
            tr.resample(samp_rate)
            tr.data = envelope(tr.data)
    if not master:
        master = st[0]
    else:
        master = master
    master.data = np.nan_to_num(master.data)
    for i, tr in enumerate(st):
        tr.data = np.nan_to_num(tr.data)
        Logger.debug('Comparing {0} with the master'.format(tr.id))
        shift_len = int(0.3 * len(tr))
        Logger.debug('Shift length is set to ' + str(shift_len) + ' samples')
        index, cc = xcorr(master, tr, shift_len)
        wav_id = WaveformStreamID(station_code=tr.stats.station,
                                  channel_code=tr.stats.channel,
                                  network_code=tr.stats.network)
        event.picks.append(
            Pick(time=tr.stats.starttime + (index / tr.stats.sampling_rate),
                 waveform_id=wav_id,
                 phase_hint='S',
                 onset='emergent'))
        Logger.debug(event.picks[i])
    event.origins[0].time = min([pick.time for pick in event.picks]) - 1
    # event.origins[0].latitude = float('nan')
    # event.origins[0].longitude = float('nan')
    # Set arbitrary origin time
    del st
    return event
Esempio n. 51
0

import matplotlib.pyplot as plt
from merge_single import merge_single
from numpy import sign
import numpy as np
from obspy.signal.cross_correlation import xcorr
corr=0

ax = plt.subplot(111)
time_vector = np.linspace(-50.0,50.0,50001)
for k in range(1048728,1048840,4):
 end=k+4
 print end
 tr1=merge_single(6,k,end)
 tr2=merge_single(7,k,end)
 tr1.detrend('linear')
 tr2.detrend('linear')
 tr1.filter('bandpass', freqmin=0.1, freqmax=2, corners=2, zerophase=True)
 tr2.filter('bandpass', freqmin=0.1, freqmax=2, corners=2, zerophase=True)
 tr1=sign(tr1.data)
 tr2=sign(tr2.data)

 index,value,acorr = xcorr(tr1, tr2, 25000, full_xcorr=True)
 print acorr
 ax.plot(time_vector,acorr/np.max(acorr) +k-1048728)
 corr+=acorr
ax.plot(time_vector,corr/np.max(corr)-4)
plt.show()
Esempio n. 52
0
def cross_net(stream, env=False, debug=0, master=False):
    r"""Function to generate picks for each channel based on optimal moveout \
    defined by maximum cross-correaltion with master trace.  Master trace \
    will be the first trace in the stream.

    :type stream: :class: obspy.Stream
    :param stream: Stream to pick
    :type envelope: bool
    :param envelope: To compute cross-correlations on the envelope or not.
    :type debug: int
    :param debug: Debug level from 0-5
    :type master: obspy.Trace
    :param master: Trace to use as master, if False, will use the first trace \
            in stream.

    :returns: list of pick class
    """
    from obspy.signal.cross_correlation import xcorr
    from obspy.signal.filter import envelope
    from eqcorrscan.utils.sfile_util import PICK
    import matplotlib.pyplot as plt
    import numpy as np
    picks = []
    samp_rate = stream[0].stats.sampling_rate
    if not env:
        if debug > 2:
            print('Using the raw data')
        st = stream.copy()
        st.resample(samp_rate)
    else:
        st = stream.copy()
        if debug > 2:
            print('Computing envelope')
        for tr in st:
            tr.resample(samp_rate)
            tr.data = envelope(tr.data)
    if debug > 2:
        st.plot(equal_scale=False, size=(800, 600))
    if not master:
        master = st[0]
    else:
        master = master
    master.data = np.nan_to_num(master.data)
    for tr in st:
        tr.data = np.nan_to_num(tr.data)
        if debug > 2:
            msg = ' '.join([
                'Comparing', tr.stats.station, tr.stats.channel,
                'with the master'
            ])
            print(msg)
        shift_len = int(0.3 * len(tr))
        if debug > 2:
            print('Shift length is set to ' + str(shift_len) + ' samples')
        if debug > 3:
            index, cc, cc_vec = xcorr(master, tr, shift_len, full_xcorr=True)
            cc_vec = np.nan_to_num(cc_vec)
            if debug > 4:
                print(cc_vec)
            fig = plt.figure()
            ax1 = fig.add_subplot(211)
            x = np.linspace(0, len(master) / samp_rate, len(master))
            ax1.plot(x,
                     master.data / float(master.data.max()),
                     'k',
                     label='Master')
            ax1.plot(x + (index / samp_rate),
                     tr.data / float(tr.data.max()),
                     'r',
                     label='Slave shifted')
            ax1.legend(loc="lower right", prop={'size': "small"})
            ax1.set_xlabel("time [s]")
            ax1.set_ylabel("norm. amplitude")
            ax2 = fig.add_subplot(212)
            print(len(cc_vec))
            x = np.linspace(0, len(cc_vec) / samp_rate, len(cc_vec))
            ax2.plot(x, cc_vec, label='xcorr')
            # ax2.set_ylim(-1, 1)
            # ax2.set_xlim(0, len(master))
            plt.show()
        index, cc = xcorr(master, tr, shift_len)
        pick = PICK(station=tr.stats.station,
                    channel=tr.stats.channel,
                    impulsivity='E',
                    phase='S',
                    weight='1',
                    time=tr.stats.starttime + (index / tr.stats.sampling_rate))
        if debug > 2:
            print(pick)
        picks.append(pick)
    del st
    return picks
Esempio n. 53
0
def cross_net(stream, env=False, debug=0, master=False):
    """
    Generate picks using a simple envelope cross-correlation.
    Picks are made for each channel based on optimal moveout \
    defined by maximum cross-correlation with master trace.  Master trace \
    will be the first trace in the stream.

    :type stream: :class: obspy.Stream
    :param stream: Stream to pick
    :type env: bool
    :param env: To compute cross-correlations on the envelope or not.
    :type debug: int
    :param debug: Debug level from 0-5
    :type master: obspy.Trace
    :param master: Trace to use as master, if False, will use the first trace \
            in stream.

    :returns: obspy.core.event.Event

    .. rubric:: Example

    >>> from obspy import read
    >>> from eqcorrscan.utils.picker import cross_net
    >>> st = read()
    >>> event = cross_net(st, env=True)
    >>> event.creation_info.author
    'EQcorrscan'
    """
    from obspy.signal.cross_correlation import xcorr
    from obspy.signal.filter import envelope
    from obspy import UTCDateTime
    from obspy.core.event import Event, Pick, WaveformStreamID
    from obspy.core.event import CreationInfo, Comment, Origin
    import matplotlib.pyplot as plt
    import numpy as np

    event = Event()
    event.origins.append(Origin())
    event.creation_info = CreationInfo(author='EQcorrscan',
                                       creation_time=UTCDateTime())
    event.comments.append(Comment(text='cross_net'))
    samp_rate = stream[0].stats.sampling_rate
    if not env:
        if debug > 2:
            print('Using the raw data')
        st = stream.copy()
        st.resample(samp_rate)
    else:
        st = stream.copy()
        if debug > 2:
            print('Computing envelope')
        for tr in st:
            tr.resample(samp_rate)
            tr.data = envelope(tr.data)
    if debug > 2:
        st.plot(equal_scale=False, size=(800, 600))
    if not master:
        master = st[0]
    else:
        master = master
    master.data = np.nan_to_num(master.data)
    for i, tr in enumerate(st):
        tr.data = np.nan_to_num(tr.data)
        if debug > 2:
            msg = ' '.join([
                'Comparing', tr.stats.station, tr.stats.channel,
                'with the master'
            ])
            print(msg)
        shift_len = int(0.3 * len(tr))
        if debug > 2:
            print('Shift length is set to ' + str(shift_len) + ' samples')
        if debug > 3:
            index, cc, cc_vec = xcorr(master, tr, shift_len, full_xcorr=True)
            cc_vec = np.nan_to_num(cc_vec)
            if debug > 4:
                print(cc_vec)
            fig = plt.figure()
            ax1 = fig.add_subplot(211)
            x = np.linspace(0, len(master) / samp_rate, len(master))
            ax1.plot(x,
                     master.data / float(master.data.max()),
                     'k',
                     label='Master')
            ax1.plot(x + (index / samp_rate),
                     tr.data / float(tr.data.max()),
                     'r',
                     label='Slave shifted')
            ax1.legend(loc="lower right", prop={'size': "small"})
            ax1.set_xlabel("time [s]")
            ax1.set_ylabel("norm. amplitude")
            ax2 = fig.add_subplot(212)
            print(len(cc_vec))
            x = np.linspace(0, len(cc_vec) / samp_rate, len(cc_vec))
            ax2.plot(x, cc_vec, label='xcorr')
            # ax2.set_ylim(-1, 1)
            # ax2.set_xlim(0, len(master))
            plt.show()
        index, cc = xcorr(master, tr, shift_len)
        wav_id = WaveformStreamID(station_code=tr.stats.station,
                                  channel_code=tr.stats.channel,
                                  network_code=tr.stats.network)
        event.picks.append(
            Pick(time=tr.stats.starttime + (index / tr.stats.sampling_rate),
                 waveform_id=wav_id,
                 phase_hint='S',
                 onset='emergent'))
        if debug > 2:
            print(event.picks[i])
    event.origins[0].time = min([pick.time for pick in event.picks]) - 1
    event.origins[0].latitude = float('nan')
    event.origins[0].longitude = float('nan')
    # Set arbitrary origin time
    del st
    return event
Esempio n. 54
0
def classic_xcorr(trace1, trace2, max_lag_samples):
   
    x_corr = xcorr(trace1.data, trace2.data,\
        max_lag_samples, True)[2]
    
    return x_corr
Esempio n. 55
0
def crossc(dstart,dend,ch1,ch2, Dir):
# here you load all the functions you need to use

  from obspy.seg2.seg2 import readSEG2
  from obspy.core import Stream
  import numpy as np
  from obspy.signal.cross_correlation import xcorr
  from numpy import sign
  from obspy.signal.filter import lowpass
  from obspy.signal.filter import highpass
  from obspy.signal.filter import bandstop
  from obspy.signal.filter import bandpass



  ## loading the info for outfile-name
  #stream_start = readSEG2(Dir + str(dstart) + ".dat")
  #t_start = stream_start[ch1].stats.seg2.ACQUISITION_TIME
  #stream_end = readSEG2(Dir + str(dend) + ".dat")
  #t_end = stream_end[ch1].stats.seg2.ACQUISITION_TIME

  # initialization of the arrays and variables
  corr=0
  nerror = 0
    
  #TAPER
  taper_percentage=0.05
  taper= np.blackman(int(len(time_vector) * taper_percentage))
  taper_left, taper_right = np.array_split(taper,2)
  taper = np.concatenate([taper_left,np.ones(len(time_vector)-len(taper)),taper_right])
  
  for k in range(dstart, dend, 4):
    start = k
    end = k + 5 # only used to merge 5-1 = 4 files to one stream
    try:  
		 st1 = merge_single(ch1,start,end,Dir)
		 st2 = merge_single(ch2,start,end,Dir)
		 st1.detrend('linear')  
		 st2.detrend('linear') 
		 r = k-dstart

		 st1.filter('lowpass',freq = 24, zerophase=True, corners=8)
		 st1.filter('highpass', freq= 0.05, zerophase=True, corners=2) #had to be reduced from 0.1Hz
		 st1.filter('bandstop', freqmin=8, freqmax=14, corners=4, zerophase=True)
		 st2.filter('lowpass',freq = 24, zerophase=True, corners=8)
		 st2.filter('highpass', freq= 0.05, zerophase=True, corners=2) #had to be reduced from 0.1Hz
		 st2.filter('bandstop', freqmin=8, freqmax=14, corners=4, zerophase=True)
		 
		 # 1-bit normalization
		 tr1 = sign(st1[0].data)
		 tr2 = sign(st2[0].data)    	
		 	
		 # cross-correlation
		 index, value, acorr = xcorr(tr1, tr2, 25000, full_xcorr=True)
		
		 # check sanity
		 if np.max(acorr)>1:
		   acorr = zeros(50001)   
		 
		 corr += acorr
		 print corr
    except:
      nerror += 1
      print "%d : ERROR" %(r)
  	
  corr = corr/np.max(np.abs(corr)) * taper

  return corr
Esempio n. 56
0
def proceve(eve, sta, debug=False):

    try:
        coords = sp.get_coordinates(net + '.' + sta + '.00.LHZ',
                                    eve.origins[0].time)
    except:
        return
    (dis, azi, bazi) = gps2dist_azimuth(coords['latitude'],
                                        coords['longitude'],
                                        eve.origins[0].latitude,
                                        eve.origins[0].longitude)

    # Now in km
    dis *= 1. / 1000.
    disdeg = dis * 0.0089932
    # Check for events way outside of our interested window
    if disdeg <= 50. or disdeg >= 120.:
        return
    fstring = 'Results/JUNK' + net + '_' + sta + '_' + str(eve.origins[0].time.year) + '_' + str(eve.origins[0].time.julday).zfill(3) + \
            '_' + str(eve.origins[0].time.hour).zfill(2) + '_' + str(eve.origins[0].time.minute).zfill(2) + \
            '_Results.csv'
    feve = open(fstring, 'w')
    feve.write(
        'sta, loc, year, day, f0, distance, azimuth, corr, mag, mHV, stdHV, pRV, pNV, pEV \n'
    )
    if debug:
        print('Distance: ' + str(dis))
        print('Azimuth: ' + str(azi))
    # compute arrival start and end times 620 s window
    astime = eve.origins[0].time + int(dis / 4.) - 30.
    aetime = astime + window - 30.

    # Grab the data now have trimmed RT data in velocity with filter
    locs = glob.glob('/msd/' + net + '_' + sta + '/' + str(astime.year) + '/' +
                     '/' + str(astime.julday).zfill(3) + '/*LHZ*')
    locs = [(loc.split('/')[-1]).split('_')[0] for loc in locs]
    for loc in locs:
        try:
            #if True:
            if debug:
                print('Grabbing the event data')
            st = grabdata(astime, aetime, bazi, sp, sta, net, loc)

        except:
            print('No data for: ' + sta)
            continue
        if debug:
            print(Noisest)
            print(st)

        for f0 in f0s:

            st2 = finalfilter(st, 0., bazi, astime, aetime, True)
            # Here is our data in the ZNE directions so no rotation
            st2ZNE = finalfilter(st, 0., 0., astime, aetime, False)
            st2 += st2ZNE
            st2.merge()

            if st2[0].stats.npts < .9 * window:
                continue
            # We now have a good event with high SNR

            #mHV, stdHV, corr = HVscheme2(st, f0, bazi, astime, aetime, disdeg, eve)

            st2 = finalfilter(st, f0, bazi, astime, aetime, True)
            corrs = []
            win = int(round(1. / f0, 0))
            for window2 in st2.slide(window_length=win,
                                     step=int(round(win / 16., 0))):
                HilbertV = np.imag(
                    hilbert(window2.select(component="Z")[0].data))
                lag, corr = xcorr(HilbertV,
                                  window2.select(component="R")[0].data,
                                  5,
                                  full_xcorr=False)
                #corr = pearsonr(HilbertV, window.select(component="R")[0].data)
                corrs.append(corr)
            corr = corrs
            HilbertV = np.imag(hilbert(st2.select(component="Z")[0].data))
            oldx = np.asarray(range(
                len(corr))) / (float(len(corr)) / float(len(HilbertV)))
            corr = np.interp(range(len(HilbertV)), oldx, corr)
            env = envelope(
                st2.select(component="R")[0].data) * envelope(HilbertV)
            HV = envelope(
                st2.select(component="R")[0].data) / envelope(HilbertV)
            env *= 1. / np.max(np.abs(env))
            t = np.asarray(range(len(HilbertV)))

            lim = t[(corr >= .90)]
            HV2 = HV[(corr >= .90)]
            lim = lim[(HV2 <= np.mean(HV2) + 3. * np.std(HV2))
                      & (HV2 >= np.mean(HV2) - 3. * np.std(HV2))]
            HV2 = HV2[(HV2 <= np.mean(HV2) + 3. * np.std(HV2))
                      & (HV2 >= np.mean(HV2) - 3. * np.std(HV2))]

            mHV = np.mean(HV2)
            stdHV = np.std(HV2)
            stdHVL = np.std(np.log10(HV2))
            try:
                #if True:
                fig = plt.figure(1, figsize=(12, 12))
                plt.subplots_adjust(hspace=0.001)
                plt.subplot(211)
                plt.title(st[0].stats.network + ' ' + st[0].stats.station +
                          ' ' + ' Period: ' + str(int(round(1. / f0, 0))) +
                          ' s Distance: ' + str(round(disdeg, 0)) + ' degrees')
                plt.plot(t,
                         HilbertV * 10**9,
                         linewidth=2.5,
                         label=' Shifted Vertical: ' + loc)
                plt.xlim((min(t), max(t)))
                plt.plot(t,
                         st2.select(component="R")[0].data * 10**9,
                         linewidth=2.5,
                         label='Radial: ' + loc)
                plt.ylabel('Velocity (nm/s)')
                plt.xticks([])
                plt.legend(loc=1)
                plt.subplot(212)
                plt.plot(t,
                         np.log10(HV),
                         label=loc + ' (H/V=' + str(round(np.log10(mHV), 2)) +
                         '$\pm$' + str(round(stdHVL, 2)),
                         linewidth=2.5)
                #plt.plot(t, corr, label=loc + ' Characteristic Function')
                plt.ylim((-0.6, .6))
                plt.yticks([-0.3, 0., 0.3])
                plt.ylabel('H/V Ratio')
                plt.axvspan(min(lim), max(lim), 0., 2., alpha=.3, color='.5')
                plt.xlim((min(t), max(t)))
                plt.xlabel('Time (s)')
                plt.legend(loc=1)
            except:
                print('Problem continue')
            #plt.show()
            #plt.clf()

            #pV = np.fft.rfft(st2.select(component="Z")[0].data)
            #freqmin = f0/np.sqrt(2.)
            #freqmax = f0*np.sqrt(2.)
            #freqs = np.fft.rfftfreq(st2[0].stats.npts)
            #pV = pV[(freqs <= freqmax) & (freqs >= freqmin)]
            #feve.write(sta + ', ' + loc + ', ' + str(eve.origins[0].time.year) +', ' +
            #str(eve.origins[0].time.julday).zfill(3) + ', ' + str(f0) + ', ' +
            #str(disdeg) + ', ' + str(azi) + ', ' + str(eve.magnitudes[0].mag)
            #+ ', ' + str(mHV) + ', ' + str(stdHV))
            #for comp in ['R', 'N', 'E']:
            #pC = np.fft.rfft(st2.select(component=comp)[0].data)
            #pC = pC[(freqs <= freqmax) & (freqs >= freqmin)]
            #pCV = np.mean(np.abs(pC)/np.abs(pV))
            #feve.write( ', ' + str(pCV) )
            #feve.write(' \n')
    plt.savefig('BOTHPLT_' + st[0].stats.network + '_' + st[0].stats.station + '_' + st[0].stats.location + '_' + str(eve.origins[0].time.year) + '_' + str(eve.origins[0].time.julday).zfill(3) + \
            '_' + str(eve.origins[0].time.hour).zfill(2) + '_' + str(eve.origins[0].time.minute).zfill(2) + '_' + str(int(round(1./f0,0))) + '.pdf', format='PDF', dpi=400)
    plt.clf()
    feve.close()
    num_lines = sum(1 for line in open(fstring))
    if num_lines <= 1:
        os.remove(fstring)
    return
Esempio n. 57
0
def match_synth(sfile, cont_base, freqmin=2.0, freqmax=10.0, samp_rate=100.0,\
                threshold=8.0, threshold_type='MAD', trig_int=6.0, plotvar=True,\
                save_template=True):
    """
    Function to generate a basic synthetic from a real event, given by an s-file
    and cross-correlate this with the day of continuous data including the event

    :type sfile: str
    :param sfile: Path to the s-file for the event
    :type cont_base: str
    :param cont_base: Path to the continuous data, should be in Yyyyy/Rjjj.01\
                directories
    :type freqmin: float
    :param freqmin: Low-cut for bandpass in Hz, defualts to 2.0
    :type freqmax: float
    :param freqmax: High-cut for bandpass in Hz, defaults to 10.0
    :type samp_rate: float
    :param samp_rate: Desired sampling rate in Hz, defaults to 100.0
    :type threshold: float
    :param threshold: Threshold for detection in cccsum, defaults to 8.0
    :type threshold_type: str
    :param threshold_type: Type to threshold, either MAD or ABS, defaults to MAD
    :type trig_int: float
    :param trig_int: Trigger interval in seconds, defaults to 6.0
    :type plotvar: bool
    :param plotvar: To plot or not, defaults to true

    :returns: detections
    """
    # import matplotlib.pyplot as plt
    from eqcorrscan.core import match_filter, template_gen
    from eqcorrscan.utils import Sfile_util, pre_processing
    import glob
    from obspy import read, Stream, UTCDateTime
    from obspy.signal.cross_correlation import xcorr
    from joblib import Parallel, delayed
    from multiprocessing import cpu_count
    import numpy as np
    # Generate the synthetic
    synth_template=synth_from_sfile(sfile, samp_rate, length=1.0,\
                                    PS_ratio=1.68)
    synth_template.filter('bandpass', freqmin=freqmin, freqmax=freqmax)
    for tr in synth_template:
        tr.data=(tr.data*1000).astype(np.int32)
    # Find the date from the sfile
    event_date=Sfile_util.readheader(sfile).time.datetime
    day=UTCDateTime(event_date.date())
    # Work out which stations we have template info for
    stachans=[(tr.stats.station, tr.stats.channel) for tr in synth_template]
    # Read in the day of data
    for stachan in stachans:
        wavfile=glob.glob(cont_base+event_date.strftime('/Y%Y/R%j.01/')+\
                            stachan[0]+'.*.'+stachan[1][0]+'?'+stachan[1][-1]+\
                            '.'+event_date.strftime('%Y.%j'))
        if len(wavfile) != 0:
            for wavf in wavfile:
                if not 'st' in locals():
                    st=read(wavf)
                else:
                    st+=read(wavf)
    st=st.merge(fill_value='interpolate')
    cores=cpu_count()
    if len(st) < cores:
        jobs=len(st)
    else:
        jobs=cores
    st=Parallel(n_jobs=jobs)(delayed(pre_processing.dayproc)(tr, freqmin,\
                                                             freqmax, 3,\
                                                             samp_rate, 0,\
                                                             day)
                            for tr in st)
    st=Stream(st)
    # Make the real template
    picks=Sfile_util.readpicks(sfile)
    real_template=template_gen._template_gen(picks, st, 1.0, 'all',\
                                            prepick=10/samp_rate)
    for tr in real_template:
        tr.data=tr.data.astype(np.int32)
    if save_template:
        real_template.write('Real_'+sfile.split('/')[-1], format='MSEED',\
                            encoding='STEIM2')
    # Shift the synthetic to better align with the real one
    for tr in real_template:
        synth_tr=synth_template.select(station=tr.stats.station,\
                                        channel=tr.stats.channel)[0]
        shift, corr = xcorr(tr.data, synth_tr.data, 20)
        print tr.stats.station+'.'+tr.stats.channel+\
            ' shift='+str(shift)+'samples corr='+str(corr)
        if corr < 0:
            synth_tr.data*=-1
        # Apply a pad
        pad=np.zeros(abs(shift))
        if shift < 0:
            synth_tr.data=np.append(synth_tr.data, pad)[abs(shift):]
        elif shift > 0:
            synth_tr.data=np.append(pad, synth_tr.data)[0:-shift]
    if save_template:
        synth_template.write('Synthetic_'+sfile.split('/')[-1],
                            format='MSEED', encoding='STEIM2')
    # Now we have processed data and a template, we can try and detect!
    detections=match_filter.match_filter(['Synthetic_'+sfile.split('/')[-1],
                                        'Real_'+sfile.split('/')[-1]],\
                                        [synth_template, real_template],\
                                        st, threshold, \
                                        threshold_type, trig_int,\
                                        plotvar, 'synth_temp')
    f=open('Synthetic_test.csv', 'w')
    f.write('template, detect-time, cccsum, threshold, number of channels\n')
    for detection in detections:
        # output detections to file
        f.write(detection.template_name+', '+str(detection.detect_time)+\
                ', '+str(detection.detect_val)+', '+str(detection.threshold)+\
                ', '+str(detection.no_chans)+'\n')
        print 'template: '+detection.template_name+' detection at: '\
            +str(detection.detect_time)+' with a cccsum of: '+\
            str(detection.detect_val)
    if detections:
        f.write('\n')
    f.close()
Esempio n. 58
0
def stretching(signalRef, signalStr, epsilons, timevec, starttime=None, endtime=None):
	"""
	Calculates the stretching factor eps. This is the factor with which a signal (signalStr)
	must be stretched to get the highest correlation with a reference signal (signalRef).
	The factor eps is chosen from an array epsilons. The time vector of both signals is timevec.
	If starttime and endtime for a time window are provided, eps is calcutlated for the positive
	and negative time window as well for both time windows together. Starttime and endtime refer
	to the positive time window; from that a negative time window is calculated
	(e.g. starttime = 20.0, endtime = 50.0 --> -20.0 and -50.0 for the negative window).
	If no starttime and endtime are given, eps is computed for the whole data.
	"""

   
	if starttime!=None and endtime!=None: # eps for time windows

		if endtime > timevec[-1]:
			raise ValueError('Time window exceeds bound of time vector!')
		if starttime < 0.0:
			raise ValueError('Positive and negative time window are overlapping!')
		if starttime > endtime:
			raise ValueError('Starttime must be smaller than endtime!')	

		# indices of starttime and endtime of the time windows

		pos_t1 = np.abs(timevec-starttime).argmin()
		pos_t2 = np.abs(timevec-endtime).argmin()

		
		# taper the time windows
		pos_time = timevec[pos_t1:(pos_t2+1)]
		pos_taper_percentage = 0.1
		pos_taper = np.blackman(int(len(pos_time) * pos_taper_percentage))
		pos_taper_left, pos_taper_right = np.array_split(pos_taper, 2)
		pos_taper = np.concatenate([pos_taper_left, np.ones(len(pos_time)-len(pos_taper)), pos_taper_right])


		pos_signalRef = pos_taper * signalRef[pos_t1:(pos_t2+1)]
		pos_signalStr = pos_taper * signalStr[pos_t1:(pos_t2+1)]

		
		# calculate the correlation coefficient CC for each epsilon
		posCC = []

		for i in xrange(0,len(epsilons),1):
			# positive time window
			pos_time_new = (1.0-epsilons[i])*pos_time
			pos_s = InterpolatedUnivariateSpline(pos_time_new, pos_signalStr)
			pos_stretch = pos_s(pos_time)
			pos_coeffs = xcorr(pos_stretch,pos_signalRef,0)
			posCC.append(abs(pos_coeffs[1]))

			
		# determine the max. CC and corresponding epsilon
		posmaxCC = max(posCC)
		posindex = posCC.index(posmaxCC)
		poseps = epsilons[posindex]


		
	
		# decomment for showing plot of signal, reference signal and stretched signal in positive timewindow
		# pos_time_eps = (1.0-poseps)*pos_time
		# s_poseps = InterpolatedUnivariateSpline(pos_time_eps, pos_signalStr)
		# stretch_poseps = s_poseps(pos_time)

		# plt.plot(pos_time, pos_signalStr, 'r')
		# plt.plot(pos_time, pos_signalRef,'b')
		# plt.plot(pos_time, stretch_poseps,'g')
		# plt.xlabel('seconds')
		# plt.title('Comparison of CCFs')
		# plt.legend(('signal', 'reference signal', 'stretched signal'))
		# plt.show()



		# decomment for showing plot of signal, reference signal and stretched signal in both timewindows
		# both_time_eps = (1.0-botheps)*both_time
		# s_botheps = InterpolatedUnivariateSpline(both_time_eps, both_signalStr)
		# stretch_botheps = s_botheps(both_time)

		# plt.plot(both_time, both_signalStr, 'r')
		# plt.plot(both_time, both_signalRef,'b')
		# plt.plot(both_time, stretch_botheps,'g')
		# plt.xlabel('seconds')
		# plt.title('Comparison of CCFs')
		# plt.legend(('signal', 'reference signal', 'stretched signal'))
		# plt.show()

		return poseps, posmaxCC
	
	elif (starttime == None and endtime != None) or (starttime != None and endtime == None):
		raise SyntaxError('Both starttime and endtime must be given!')


	else: # eps for whole data
		counter=0
		# taper the signal and the reference
		taper_percentage = 0.1
		taper = np.blackman(int(len(timevec) * taper_percentage))
		taper_left, taper_right = np.array_split(taper, 2)
		taper = np.concatenate([taper_left, np.ones(len(timevec)-len(taper)), taper_right])

		signalStr = signalStr * taper
		signalRef = signalRef * taper
		
		# calculate the correlation coefficient CC for each epsilon
		CC = []
		for i in xrange(0,len(epsilons),1):
			time_new = (1.0-epsilons[i])*timevec
			s = InterpolatedUnivariateSpline(time_new, signalStr)
			stretch = s(timevec)
			coeffs = xcorr(stretch,signalRef,0)
			CC.append(abs(coeffs[1]))
			counter+=1
			#print (counter)
		
		# determine the max. CC and corresponding epsilon
		maxCC = max(CC)
		index = CC.index(maxCC)
		eps = epsilons[index]
		
		## decomment for showing plot of signal, reference signal and stretched signal
		#time_eps = (1.0-eps)*timevec
		#s_eps = InterpolatedUnivariateSpline(time_eps, signalStr)
		#stretch_eps = s_eps(timevec)

		##plot of the signal, reference signal and the stretched signal
		#plt.plot(timevec, signalStr, 'r')
		#plt.plot(timevec, signalRef,'b')
		#plt.plot(timevec, stretch_eps,'g')
		#plt.xlabel('seconds')
		#plt.title('Comparison of CCFs')
		#plt.legend(('signal', 'reference signal', 'stretched signal'))
		#plt.show()
	
		return eps, maxCC
Esempio n. 59
0
def correlatePhaseShift(gr,st,args):

    # Correlation for Zcor for alla and for each component
    # T --> TSS,TDS         | S1 - u1,u2
    # R --> RSS,RDS,RDD     | S2 - u3,u4,u5
    # V --> ZSS,ZDS,ZDD     | S3 - u6,u7,u8
   
    # width for time cross correlation
    wid = int(float(st[0].stats.npts)/4)


    for i in range(len(st)/3):

          x = np.arange(16.).reshape(8,2)
          t = np.arange(4.).reshape(2,2)
          r = np.arange(6.).reshape(3,2)
          v = np.arange(6.).reshape(3,2)

          # Tangential
          a,b = xcorr(st[3*i+0], gr[10*i+0], wid)
          x[0][0]=abs(b)
          x[0][1]=a
          t[0][0]=abs(b)
          t[0][1]=a
          c,d = xcorr(st[3*i+0], gr[10*i+1], wid)
          x[1][0]=abs(b)
          x[1][1]=a
          t[1][0]=abs(b)
          t[1][1]=a
          

          # Radial
          a,b = xcorr(st[3*i+1], gr[10*i+2], wid)
          x[2][0]=abs(b)
          x[2][1]=a
          r[0][0]=abs(b)
          r[0][1]=a
          a,b = xcorr(st[3*i+1], gr[10*i+3], wid)
          x[3][0]=abs(b)
          x[3][1]=a
          r[1][0]=abs(b)
          r[1][1]=a
          a,b = xcorr(st[3*i+1], gr[10*i+4], wid)
          x[4][0]=abs(b)
          x[4][1]=a
          r[2][0]=abs(b)
          r[2][1]=a

          # Vertical
          a,b = xcorr(st[3*i+2], gr[10*i+5], wid)
          x[5][0]=abs(b)
          x[5][1]=a
          v[0][0]=abs(b)
          v[0][1]=a
          a,b = xcorr(st[3*i+2], gr[10*i+6], wid)
          x[6][0]=abs(b)
          x[6][1]=a
          v[1][0]=abs(b)
          v[1][1]=a
          a,b = xcorr(st[3*i+2], gr[10*i+7], wid)
          x[7][0]=abs(b)
          x[7][1]=a
          v[2][0]=abs(b)
          v[2][1]=a

          # sort for zcor
          X=np.array(sorted(sorted(x,key=lambda e:e[1]),key=lambda e:e[0]))
          T=np.array(sorted(sorted(t,key=lambda e:e[1]),key=lambda e:e[0]))
          R=np.array(sorted(sorted(r,key=lambda e:e[1]),key=lambda e:e[0]))
          V=np.array(sorted(sorted(v,key=lambda e:e[1]),key=lambda e:e[0]))
          Zco = X[-1][1]
          Tco = T[-1][1]
          Rco = R[-1][1]
          Vco = V[-1][1]

          # Update stats
          for l in range(0,3):
             st[3*i+l].stats.Zcor = Zco
             st[3*i+l].stats.Tcor = Tco
             st[3*i+l].stats.Rcor = Rco
             st[3*i+l].stats.Vcor = Vco
          for l in range(0,10):
             gr[10*i+l].stats.Zcor = Zco
             gr[10*i+l].stats.Tcor = Tco
             gr[10*i+l].stats.Rcor = Rco
             gr[10*i+l].stats.Vcor = Vco


    return (gr,st)