Beispiel #1
0
 def test_integer_input_equals_float_input(self):
     a = [-3, 0, 4]
     b = [-3, 4]
     c = np.array(a, dtype=float)
     d = np.array(b, dtype=float)
     for demean in (True, False):
         for normalize in (None, 'naive'):
             cc1 = correlate(a,
                             b,
                             3,
                             demean=demean,
                             normalize=normalize,
                             method='direct')
             cc2 = correlate(c, d, 3, demean=demean, normalize=normalize)
             np.testing.assert_allclose(cc1, cc2)
         for normalize in (None, 'naive', 'full'):
             cc3 = correlate_template(a,
                                      b,
                                      demean=demean,
                                      normalize=normalize,
                                      method='direct')
             cc4 = correlate_template(c,
                                      d,
                                      demean=demean,
                                      normalize=normalize)
             np.testing.assert_allclose(cc3, cc4)
Beispiel #2
0
    def test_shift(self):
        tr = read()[0]
        dt = tr.stats.delta
        t = tr.stats.starttime = UTC('2018-01-01T00:00:10.000000Z')
        tr2 = tr.copy()
        _downsample_and_shift(tr2)
        self.assertEqual(tr2, tr)

        tr2 = tr.copy()
        tr2.stats.starttime = t + 0.1 * dt
        _downsample_and_shift(tr2)
        self.assertEqual(tr2.stats.starttime, t)

        tr2 = tr.copy()
        tr2.stats.starttime = t - 0.1 * dt
        _downsample_and_shift(tr2)
        self.assertEqual(tr2.stats.starttime, t)

        tr2 = tr.copy()
        tr2.stats.starttime = t - 0.49 * dt
        _downsample_and_shift(tr2)
        self.assertEqual(tr2.stats.starttime, t)

        tr2 = tr.copy()
        tr2.stats.starttime = t - 0.0001 * dt
        _downsample_and_shift(tr2)
        self.assertEqual(tr2.stats.starttime, t)

        # shift cumulatively by +1 sample
        tr2 = tr.copy()
        tr2.stats.starttime += 0.3 * dt
        _downsample_and_shift(tr2)
        tr2.stats.starttime += 0.3 * dt
        _downsample_and_shift(tr2)
        tr2.stats.starttime += 0.4 * dt
        _downsample_and_shift(tr2)
        self.assertEqual(tr2.stats.starttime, t)
        np.testing.assert_allclose(tr2.data[201:-200], tr.data[200:-201],
                                   rtol=1e-2, atol=1)
        cc = correlate(tr2.data, tr.data, 1000)
        shift, cc_max = xcorr_max(cc)
        self.assertEqual(shift, 1)
        self.assertGreater(cc_max, 0.995)

        # shift cumulatively by -1 sample
        tr2 = tr.copy()
        tr2.stats.starttime -= 0.3 * dt
        _downsample_and_shift(tr2)
        tr2.stats.starttime -= 0.3 * dt
        _downsample_and_shift(tr2)
        tr2.stats.starttime -= 0.4 * dt
        _downsample_and_shift(tr2)
        self.assertEqual(tr2.stats.starttime, t)
        np.testing.assert_allclose(tr2.data[200:-201], tr.data[201:-200],
                                   rtol=1e-2, atol=2)
        cc = correlate(tr2.data, tr.data, 1000)
        shift, cc_max = xcorr_max(cc)
        self.assertEqual(shift, -1)
        self.assertGreater(cc_max, 0.995)
 def test_correlate_deprecated_domain_keyword(self):
     with warnings.catch_warnings(record=True) as w:
         warnings.simplefilter("always", category=ObsPyDeprecationWarning)
         a = [1, 2, 3]
         b = [1, 2]
         correlate(a, b, 5, domain='freq')
         correlate(a, b, 5, domain='time')
         self.assertEqual(len(w), 2)
 def test_correlate_deprecated_domain_keyword(self):
     with warnings.catch_warnings(record=True) as w:
         warnings.simplefilter("always", category=ObsPyDeprecationWarning)
         a = [1, 2, 3]
         b = [1, 2]
         correlate(a, b, 5, domain='freq')
         correlate(a, b, 5, domain='time')
         self.assertEqual(len(w), 2)
Beispiel #5
0
    def CC_stream(self, p_obs, p_syn, s_obs,s_syn,p_start_obs,p_start_syn):
        dt = s_obs[0].meta.delta
        misfit = np.array([])
        misfit_obs = np.array([])
        time_shift = np.array([], dtype=int)
        amplitude = np.array([])

        amp_obs = p_obs.copy()
        amp_obs.trim(p_start_obs, p_start_obs + 25)
        amp_syn = p_syn.copy()
        amp_syn.trim(p_start_syn, p_start_syn + 25)

        # S - correlations:
        for i in range(len(s_obs)):
            cc_obspy = cc.correlate(s_obs[i].data, s_syn[i].data, int(0.25*len(s_obs[i].data)))
            shift, CC_s = cc.xcorr_max(cc_obspy, abs_max=False)

            s_syn_shift_obspy = self.shift(s_syn[i].data, -shift)

            D_s = 1 - CC_s  # Decorrelation
            time_shift = np.append(time_shift, shift)
            misfit = np.append(misfit, ((CC_s - 0.95) ** 2) / (2 * (0.1) ** 2))# + np.abs(shift))

            # misfit = np.append(misfit,((CC - 0.95) ** 2) / (2 * (0.1) ** 2))

            # plt.plot(self.zero_to_nan(s_syn_shift_obspy),label='s_syn_shifted_obspy',linewidth=0.3)
            # plt.plot(self.zero_to_nan(s_syn[i]),label = 's_syn',linewidth=0.3)
            # plt.plot(self.zero_to_nan(s_obs[i]),label = 's_obs',linestyle = ":",linewidth=0.3)
            # plt.legend()
            # plt.tight_layout()
            # plt.savefig(self.save_dir + '/S_%s.pdf' % s_obs.traces[i].meta.channel)
            plt.close()
        # P- correlation
        for i in range(len(p_obs)):
            cc_obspy = cc.correlate(p_obs[i].data, p_syn[i].data, int( 0.25*len(p_obs[i].data)))
            shift, CC_p = cc.xcorr_max(cc_obspy ,abs_max=False)
            # time = -shift * p_syn[i].meta.delta
            p_syn_shift_obspy = self.shift(p_syn[i].data, -shift)

            D_p = 1 - CC_p  # Decorrelation
            time_shift = np.append(time_shift, shift)
            misfit = np.append(misfit, ((CC_p - 0.95) ** 2) / (2 * (0.1) ** 2) )#+ np.abs(shift))

            A = (np.dot(amp_obs.traces[i],amp_syn.traces[i])/np.dot(amp_obs.traces[i],amp_obs.traces[i]))
            amplitude = np.append(amplitude,abs(A))


            # plt.plot(p_obs.traces[i].data[0:200],label='P_obs',linewidth=0.3)
            # plt.plot(p_syn.traces[i].data[0:200],label = 'p_syn',linewidth=0.3)
            # plt.legend()
            # plt.tight_layout()
            # plt.show()
            # plt.savefig(self.save_dir + '/P_%s.pdf' % s_obs.traces[i].meta.channel)
            # plt.close()
        sum_misfit = np.sum(misfit)
        return misfit, time_shift, amplitude
Beispiel #6
0
 def test_correlate_normalize_true_false(self):
     a = read()[0].data[500:]
     b = a[10:]
     shift = 100
     cc1 = correlate(a, b, shift, normalize='naive')
     cc2 = correlate(a, b, shift, normalize=True)
     cc3 = correlate(a, b, shift, normalize=None)
     cc4 = correlate(a, b, shift, normalize=False)
     np.testing.assert_equal(cc1, cc2)
     np.testing.assert_equal(cc3, cc4)
 def test_correlate_normalize_true_false(self):
     a = read()[0].data[500:]
     b = a[10:]
     shift = 100
     cc1 = correlate(a, b, shift, normalize='naive')
     cc2 = correlate(a, b, shift, normalize=True)
     cc3 = correlate(a, b, shift, normalize=None)
     cc4 = correlate(a, b, shift, normalize=False)
     np.testing.assert_equal(cc1, cc2)
     np.testing.assert_equal(cc3, cc4)
Beispiel #8
0
    def L2_stream(self, p_obs, p_syn, s_obs, s_syn, or_time, var):
        dt = s_obs[0].meta.delta
        misfit = np.array([])
        time_shift = np.array([], dtype=int)
        # S - correlations:
        for i in range(len(s_obs)):
            cc_obspy = cc.correlate(s_obs[i].data, s_syn[i].data, int(0.25* len(s_obs[i].data)))
            shift, CC_s = cc.xcorr_max(cc_obspy)

            s_syn_shift = self.shift(s_syn[i].data, -shift)
            time_shift = np.append(time_shift, shift)

            # d_obs_mean = np.mean(s_obs[i].data)
            # var_array = var * d_obs_mean

            var_array = np.var(s_obs[i].data)
            # var_array = var**2

            misfit = np.append(misfit, np.matmul((s_obs[i].data - s_syn_shift).T, (s_obs[i].data - s_syn_shift)) / (
                2 * (var_array)))
            # time = -time_shift * dt  # Relatively, the s_wave arrives now time later or earlier than it originally did

            # plt.plot(s_syn_shift,label='s_shifted',linewidth=0.3)
            # plt.plot(s_syn[i], label='s_syn',linewidth=0.3)
            # plt.plot(s_obs[i], label='s_obs',linewidth=0.3)
            # plt.legend()
            # plt.savefig()
            # plt.close()
        # P- correlation
        for i in range(len(p_obs)):
            cc_obspy = cc.correlate(s_obs[i].data, s_syn[i].data, int(0.25 * len(p_obs[i].data)))
            shift, CC_p = cc.xcorr_max(cc_obspy)

            p_syn_shift = self.shift(p_syn[i].data, -shift)
            time_shift = np.append(time_shift, shift)

            # d_obs_mean = np.mean(p_obs[i].data)
            # var_array = var * d_obs_mean
            var_array = np.var(p_obs[i].data)

            misfit = np.append(misfit, np.matmul((p_obs[i].data - p_syn_shift).T, (p_obs[i].data - p_syn_shift)) / (
                2 * (var_array)))
            # time = -time_shift + len(s_obs)] * dt  # Relatively, the s_wave arrives now time later or earlier than it originally did

            # plt.plot(p_syn_shift, label='p_shifted')
            # plt.plot(p_syn[i], label='p_syn')
            # plt.plot(p_obs[i], label='p_obs')
            # plt.legend()
            # # plt.show()
            # plt.close()
        sum_misfit = np.sum(misfit)
        return sum_misfit, time_shift
Beispiel #9
0
    def test_correlate_deprecated_domain_keyword(self):

        with warnings.catch_warnings(record=True) as w:
            warnings.simplefilter("always", category=ObsPyDeprecationWarning)
            a = [1, 2, 3]
            b = [1, 2]
            correlate(a, b, 5, domain='freq')
            correlate(a, b, 5, domain='time')
        # on py37, scipy 1.1.0 this also catch FutureWarning from scipy
        # internals, so we need to filter the warning messages
        domain_warn = [x for x in w if 'keyword of correlate function'
                       in str(x.message)]
        self.assertEqual(len(domain_warn), 2)
Beispiel #10
0
    def test_correlate_deprecated_domain_keyword(self):

        with warnings.catch_warnings(record=True) as w:
            warnings.simplefilter("always", category=ObsPyDeprecationWarning)
            a = [1, 2, 3]
            b = [1, 2]
            correlate(a, b, 5, domain='freq')
            correlate(a, b, 5, domain='time')
        # on py37, scipy 1.1.0 this also catch FutureWarning from scipy
        # internals, so we need to filter the warning messages
        domain_warn = [x for x in w if 'keyword of correlate function'
                       in str(x.message)]
        self.assertEqual(len(domain_warn), 2)
def cal_waveform_similarity_deltat(tr_data, tr_sync, starttime, endtime,
                                   event_time, noise_average_energy):
    """
    Assume here we already have data and sync's traces matched.
    """
    if (starttime == None or endtime == None):
        return None, None, None, None
    starttime = event_time + starttime
    endtime = event_time + endtime
    con1 = (tr_data.stats.starttime >
            starttime - PADDING) or (tr_data.stats.endtime < endtime + PADDING)
    con2 = (tr_sync.stats.starttime > starttime) or (tr_sync.stats.endtime <
                                                     endtime)
    if (con1 or con2):
        return None, None, None, None
    cc_data = tr_data.slice(starttime - PADDING, endtime + PADDING)
    cc_sync = tr_sync.slice(starttime, endtime)
    cc = correlate(cc_data, cc_sync, None, demean=False)

    similarity = cc[len(cc) // 2]
    max_cc_pos, max_cc = xcorr_max(cc, abs_max=False)
    delta = tr_data.stats.delta
    deltat = delta * max_cc_pos

    # calculate the snr use cc_data
    signal_average_energy = np.sum(cc_data.data**2) / len(cc_data.data)
    snr = signal_average_energy / noise_average_energy

    return similarity, deltat, max_cc, snr
Beispiel #12
0
def _crosscorr(df_y1, df_y2, shift=0, **kwargs):
    """
        - correlate x and y(shifted by lag in samples)
        - several methods are implemented as detailed below
        - use: do_crosscorr
    Parameters
    ----------
            datax, datay : pandas.Series
                           have to of equal length
            shift        : int, default = 0
                           - relative shift of y , pos or neg
            method       :  str, default='pearson'
                           {‘pearson’, ‘kendall’, ‘spearman’}
                            'fft' -
    Returns
    ----------
    crosscorr : float
    """
    method = 'pearson'
    if 'method' in kwargs.keys() and kwargs['method'] is not None:
        method = kwargs['method']
    y2 = df_y2.shift(shift)
    if method == 'fft':  #uses obspy which presumably chosen between direct of fft
        y2 = y2.values
        y1 = df_y1.values[abs(y2) > 0]
        y2 = y2[abs(y2) > 0]
        fct_cc = obsCorr.correlate(y1, y2, shift=0)
        cc = obsCorr.xcorr_max(fct_cc)[1]
    else:
        cc = df_y1.corr(y2, method=method)
    return cc
Beispiel #13
0
def cc_amp(u1,u2):
    ### here is no timeshift for the assessment of misfit u1 should be the observed data while the u2 should be synthetics
    M0_fake_max=np.max(u1)/np.max(u2)
    M0_fake_min=np.min(u1)/np.min(u2)
    M0_fake=np.max(np.abs(u1))/np.max(np.abs(u2))
    #M0_fake=np.sqrt(np.dot(u1,u1)/np.dot(u2,u2))
    #e=np.abs(u1-M0_fake*u2)/np.sqrt(np.dot(np.abs(u1),np.abs(M0_fake*u2)))
    #e=np.abs(u1-M0_fake*u2)/np.sqrt((np.abs(u1)*np.abs(M0_fake*u2)))  ##### unstable
    #e_L1=np.mean(np.abs(e))
    #e_L2=(np.sqrt(np.sum((e*e))))/u1.size
    #e_fll=(e_L1+e_L2+np.sqrt(2*e_L1*e_L1+2*e_L2*e_L2))/4
    leng_shift=int(1/4*u1.size)    
    CORR=correlate(u1,u2*M0_fake,leng_shift,normalize=True,demean=True,domain='freq')
    #####keep the 
    shift,value=xcorr_max(CORR,abs_max=False)
    misfit=1-value
    shift_threshold=5/0.07
    if np.abs(shift)<shift_threshold:
       shift_misfit=0
    else:
       shift_misfit=(np.abs(shift)-shift_threshold)/shift_threshold
    #shift_misfit=np.abs(np.log(shift_misfit))
    ###here add the misfit from the errors from the amplitude
    M0_fake_abs=np.abs(np.log(np.abs(M0_fake_max)))/5+np.abs(np.log(np.abs(M0_fake_min)))/5
    e_fll=misfit+M0_fake_abs+shift_misfit
    #print(e,e_L1,e_L2,e_fll)
    return e_fll
Beispiel #14
0
def writestats(statfile, st, chan):
    """
    calculate the correlation coefficient and lag time for the synthetic
    when compared to the observed data and write to a file.
    """
    try:
        syncomp = "MX" + chan
        datacomp = "LH" + chan
        syn = st.select(channel=syncomp)
        for tr in st.select(channel=datacomp):
            resi = "{0:.2f}".format(
                np.sum(tr.data * syn[0].data[:-1]) /
                np.sum(np.square(syn[0].data[:-1])))
            cc = correlate(tr, syn[0], 500)
            lag, corr = xcorr_max(cc)
            corr = "{0:.2f}".format(corr)
            #
            statfile.write(tr.stats.network + "," + tr.stats.station)
            statfile.write("," + tr.stats.location + "," + tr.stats.channel +
                           "," + str(resi))
            statfile.write("," + str(lag) + "," + str(corr) + ", ")
            statfile.write(str(tr.stats.starttime.month) + "/" + str(tr.stats.starttime.day) + \
                                "/" + str(tr.stats.starttime.year) + " " + str(tr.stats.starttime.hour) + ":" + \
                                str(tr.stats.starttime.minute) + ":" + str(tr.stats.starttime.second) + "\n")
    except:

        print('No residual for ' + st[0].stats.station + ' ' + 'LH' + chan)
    return
Beispiel #15
0
def corr_NEW(u1,u2):
    #leng_shift=0
    leng_shift=0
    CORR=correlate(u1,u2,leng_shift,normalize=True,domain='freq')
    shift,value=xcorr_max(CORR)
    misfit=1.0-value
    return shift,misfit
 def test_correlate_template_versus_correlate(self):
     data = read()[0].data
     template = data[400:600]
     data = data[380:620]
     xcorr1 = correlate_template(data, template, normalize='naive')
     xcorr2 = correlate(data, template, 20)
     np.testing.assert_equal(xcorr1, xcorr2)
Beispiel #17
0
def write_event_results(st,
                        net,
                        stack,
                        eve,
                        not_used,
                        comp,
                        inv,
                        paramdic,
                        lat=None,
                        lon=None):
    st2 = st.select(component=comp)
    # we will make a csv file with the infor for each channel for the event

    if not os.path.exists(net + '_results'):
        os.mkdir(net + '_results')
    filehand = net + '_results/Results_' + net + '_' + \
                comp + '_' + paramdic['phase'] + \
                '_' + str(eve['origins'][0]['time'].year) + \
                str(eve['origins'][0]['time'].julday) + '_' + \
                str(eve['origins'][0]['time'].hour).zfill(2) + \
                str(eve['origins'][0]['time'].minute).zfill(2)
    if lat is not None:
        filehand += '_' + str(abs(lat)) + '_' + str(abs(lon))
    filehand += '.csv'

    f = open(filehand, 'w')
    f.write(
        'ID, dis, azimuth, depth, mag, amp, shift, corr, used, ptp, snr \n')

    for idx, tr in enumerate(st2):
        f.write(tr.id + ', ')

        coors = inv.get_coordinates(tr.id[:-1] + 'Z')

        (dis, azi, bazi) = gps2dist_azimuth(coors['latitude'],
                                            coors['longitude'],
                                            eve.origins[0].latitude,
                                            eve.origins[0].longitude)
        disdeg = kilometer2degrees(dis / 1000.)
        f.write(str(disdeg) + ', ')
        f.write(str(azi) + ', ')
        f.write(str(float(eve['origins'][0]['depth']) / 1000) + ', ')
        f.write(str(eve.magnitudes[0].mag) + ', ')
        amp = np.sqrt(np.sum(tr.data**2) / np.sum(stack**2))
        f.write(str(amp) + ', ')
        cc = correlate(tr.data, stack, 20)
        shift, value = xcorr_max(cc)
        f.write(str(shift / float(tr.stats.sampling_rate)) + ', ')
        f.write(str(round(value, 5)) + ', ')
        if idx in not_used:
            f.write('Bad, ')
        else:
            f.write('Good, ')
        tr2 = tr.copy()
        tr2.trim(tr2.stats.starttime, tr2.stats.starttime + 5.)
        f.write(str(np.ptp(tr.data)) + ', ')
        f.write(str(np.ptp(tr.data) / np.ptp(tr2.data)) + '\n')
    f.close()

    return
Beispiel #18
0
 def test_correlate_template_versus_correlate(self):
     data = read()[0].data
     template = data[400:600]
     data = data[380:620]
     xcorr1 = correlate_template(data, template, normalize='naive')
     xcorr2 = correlate(data, template, 20)
     np.testing.assert_equal(xcorr1, xcorr2)
Beispiel #19
0
def comp_stack(st, comp, debug=False):
    st2 = st.select(component=comp)
    comb = combinations(range(len(st2)), 2)
    used, not_used = [], []
    results = {}
    for ele in list(comb):
        tr1 = st2[ele[0]].copy()
        tr2 = st2[ele[1]].copy()
        cc = correlate(tr1.data, tr2.data, 20)
        shift, value = xcorr_max(cc)
        if debug:
            print(shift)
            print(value)
        if value <= 0.8:
            continue
        tr2.stats.starttime -= float(shift) / tr2.stats.sampling_rate
        if 'stack' not in vars():
            stack = tr2.data + tr1.data
            used.append(ele[1])
            used.append(ele[0])
        elif ele[1] not in used:
            stack += tr2.data
            used.append(ele[1])
    try:
        stack /= float(len(used))
    except:
        stack = []
    for idx in range(len(st)):
        if idx not in used:
            not_used.append(st[idx].id)
    return stack, not_used
 def test_integer_input_equals_float_input(self):
     a = [-3, 0, 4]
     b = [-3, 4]
     c = np.array(a, dtype=float)
     d = np.array(b, dtype=float)
     for demean in (True, False):
         for normalize in (None, 'naive'):
             cc1 = correlate(a, b, 3, demean=demean, normalize=normalize,
                             method='direct')
             cc2 = correlate(c, d, 3, demean=demean, normalize=normalize)
             np.testing.assert_allclose(cc1, cc2)
         for normalize in (None, 'naive', 'full'):
             cc3 = correlate_template(a, b, demean=demean,
                                      normalize=normalize, method='direct')
             cc4 = correlate_template(c, d, demean=demean,
                                      normalize=normalize)
             np.testing.assert_allclose(cc3, cc4)
 def test_correlate(self):
     # simple test
     a, b = [0, 1], [20, 10]
     cc = correlate(a, b, 1, demean=False, normalize=False)
     shift, value = xcorr_max(cc)
     self.assertEqual(shift, 1)
     self.assertAlmostEqual(value, 20.)
     np.testing.assert_allclose(cc, [0., 10., 20.], atol=1e-14)
     # test symetry and different length of a and b
     a, b = [0, 1, 2], [20, 10]
     cc1 = correlate(a, b, 1, demean=False, normalize=False, method='fft')
     cc2 = correlate(a, b, 1, demean=False, normalize=False,
                     method='direct')
     cc3 = correlate(b, a, 1, demean=False, normalize=False, method='fft')
     cc4 = correlate(b, a, 1, demean=False, normalize=False,
                     method='direct')
     shift1, _ = xcorr_max(cc1)
     shift2, _ = xcorr_max(cc2)
     shift3, _ = xcorr_max(cc3)
     shift4, _ = xcorr_max(cc4)
     self.assertEqual(shift1, 0.5)
     self.assertEqual(shift2, 0.5)
     self.assertEqual(shift3, -0.5)
     self.assertEqual(shift4, -0.5)
     np.testing.assert_allclose(cc1, cc2)
     np.testing.assert_allclose(cc3, cc4)
     np.testing.assert_allclose(cc1, cc3[::-1])
     # test sysmetry for method='direct' and len(a) - len(b) - 2 * num > 0
     a, b = [0, 1, 2, 3, 4, 5, 6, 7], [20, 10]
     cc1 = correlate(a, b, 2, method='direct')
     cc2 = correlate(b, a, 2, method='direct')
     np.testing.assert_allclose(cc1, cc2[::-1])
 def test_correlate(self):
     # simple test
     a, b = [0, 1], [20, 10]
     cc = correlate(a, b, 1, demean=False, normalize=False)
     shift, value = xcorr_max(cc)
     self.assertEqual(shift, 1)
     self.assertAlmostEqual(value, 20.)
     np.testing.assert_allclose(cc, [0., 10., 20.], atol=1e-14)
     # test symetry and different length of a and b
     a, b = [0, 1, 2], [20, 10]
     cc1 = correlate(a, b, 1, demean=False, normalize=False, method='fft')
     cc2 = correlate(a, b, 1, demean=False, normalize=False,
                     method='direct')
     cc3 = correlate(b, a, 1, demean=False, normalize=False, method='fft')
     cc4 = correlate(b, a, 1, demean=False, normalize=False,
                     method='direct')
     shift1, _ = xcorr_max(cc1)
     shift2, _ = xcorr_max(cc2)
     shift3, _ = xcorr_max(cc3)
     shift4, _ = xcorr_max(cc4)
     self.assertEqual(shift1, 0.5)
     self.assertEqual(shift2, 0.5)
     self.assertEqual(shift3, -0.5)
     self.assertEqual(shift4, -0.5)
     np.testing.assert_allclose(cc1, cc2)
     np.testing.assert_allclose(cc3, cc4)
     np.testing.assert_allclose(cc1, cc3[::-1])
     # test sysmetry for method='direct' and len(a) - len(b) - 2 * num > 0
     a, b = [0, 1, 2, 3, 4, 5, 6, 7], [20, 10]
     cc1 = correlate(a, b, 2, method='direct')
     cc2 = correlate(b, a, 2, method='direct')
     np.testing.assert_allclose(cc1, cc2[::-1])
Beispiel #23
0
def corrNEW_amp(u1,u2):
    #leng_shift=0
    #this function take advantage of the amplitude ratio to control the radiation pattern
    M0_fake=np.max(np.abs(u1))/np.max(np.abs(u2))
    M0_fake_abs=np.abs(np.log(M0_fake))/5
    leng_shift=int(1/4*u1.size)
    CORR=correlate(u1,u2,leng_shift,normalize=True,domain='freq')
    norm=np.sum(CORR)
    shift,value=xcorr_max(CORR)
    misfit=1.0-value/norm+2*M0_fake_abs
    return shift,misfit
Beispiel #24
0
def xcorr_shift(s, d, min_period):
    """
    Calculate the correlation time shift around the maximum amplitude of the
    synthetic trace with subsample accuracy.
    """
    # Estimate shift and use it as a guideline for the subsample accuracy
    # shift.
    # the dt works if these are obspy traces, currently not sure
    shift = int(np.ceil(min_period / s.stats.delta))
    cc = crosscorr.correlate(s, d, shift=shift)
    time_shift = (cc.argmax() - shift) * s.stats.delta
    return time_shift
Beispiel #25
0
def intersta_t_v_construct(tr1,
                           tr2,
                           filttr1,
                           filttr2,
                           periods,
                           rmaxv=7,
                           rminv=2,
                           deltav=0.01,
                           shift_len=500):
    """
    Construct inter-station period-velocity matrix

    :type tr1: class:`obspy.Trace`
    :param tr1: first trace data corresponding to this station pair
    :type tr2: class:`obspy.Trace`
    :param tr2: second trace data corresponding to this station pair
    :type filttr1: `numpy.array`
    :param filttr1: isolated, normalized and filtered trace data
    :type filttr2: `numpy.array`
    :param filttr2: isolated, normalized and filtered trace data
    """
    if tr1.stats.sampling_rate != tr2.stats.sampling_rate:
        logger.error("Sampling rate of traces are different!")
        return None

    # calculate delay timescale
    npoints = shift_len * tr1.stats.samplinmg_rate
    timescale = shift_len * np.arange(-npoints, npoints + 1) / float(npoints)

    # calculate inter-station distance
    dist, _, _ = gps2dist_azimuth(tr1.stats.sac.stla, tr1.stats.sac.stlo,
                                  tr2.stats.sac.stla, tr2.stats.sca.stlo)
    dist /= 1000.0  # transfer meter into kilometer
    veloscale = dist / timescale

    # interested velocity scale
    intersveloscale = np.arange(rminv, rmaxv, deltav)

    vmatrix = np.zeros(shape=(len(periods), len(intersveloscale)))
    for iperiod, T0 in enumerate(periods):
        correlation = correlate(filttr1[iperiod, :],
                                filttr2[iperiod, :],
                                shift=npoints)

        # transfer period-shiftlen matrix into period-velocity matrix
        maskarray = (veloscale < rmaxv) * (veloscale > rminv)
        splvector = correlation[maskarray]
        # normalize single cross-correlation functions
        splvector /= splvector.max()
        # interpolate cross-correlation function with cubic-spline method
        tck = interpolate.splrep(splvector, veloscale[maskarray], s=0)
        vmatrix[iperiod, :] = interpolate.splev(intersveloscale, tck, der=0)
    return intersveloscale, vmatrix
def take1DXCorr(signal1, signal2, shift=20000, plotcc=True):
    """ 
    Takes 1D cross-correlation between two signals and returns shift and correlation
    coefficient for when signals match best.
    INPUTS
    signal1 (obspy trace object) - trace 1
    signal2 (obspy trace object) - trace 2 to correlate with trace 1
    shift (int) - optional, total length of samples to shift for cross-correlation
    plotcc (boolean) - optional, set to True to visualize cross-correlation
    OUTPUT     
    best_shift (int) - index of max cross-correlation value
    maxcoeff (float) - max cross-correlation value
    """

    cross_cor = cc.correlate(signal1,
                             signal2,
                             shift,
                             demean=True,
                             normalize=True,
                             domain='time')
    best_shift, maxcoeff = cc.xcorr_max(cross_cor)
    # zero_shift_coeff = cross_cor[shift]

    if plotcc:
        plt.figure()

        plt.subplot(411)
        plt.plot(signal1)
        plt.ylabel('Signal 1')
        x1 = np.arange(0, len(signal1))

        plt.subplot(412)
        plt.plot(signal2)
        plt.ylabel('Signal 2')
        x2 = np.arange(0, len(signal2))

        plt.subplot(413)
        shift = np.median(x1) - np.median(x2)
        plt.plot(x1, signal1)
        plt.plot(x2 + shift, signal2)
        plt.ylabel('Signal comparison')

        plt.subplot(414)
        plt.plot(cross_cor)
        plt.ylabel('Cross-correlation\ncoefficient')
        xloc = plt.xticks()[0]
        xshifts = [int(x) - shift for x in xloc]
        plt.xticks(xloc, xshifts)
        plt.show()

    # return(zero_shift_coeff)
    return (best_shift, maxcoeff)
Beispiel #27
0
def corel_pi(wav1, wav2, shift):
    from obspy.signal.cross_correlation import correlate

    corell = correlate(wav1,
                       wav2,
                       shift,
                       demean=True,
                       normalize='naive',
                       domain='time')
    top = corell.argmax()
    top_v = corell[top]
    top = (shift) - top

    return (top_v, top, corell)
Beispiel #28
0
def detect_earthquakes(de_array, sampling_rate, corr_criteria=0.7):
    """try to remove earthquake from waveforms by taking correlations with
    an exponential function. If correlation criteria met, earthquake 'detected'
    :type de_array: numpy array
    :param de_array: datastream representing waveform envelope
    :type sampling_rate: float
    :param sampling_rate: sampling rate
    :type corr_criteria: float
    :param corr_criteria: threshold for detecting earthquakes, defaults to 0.7
    :rtype quakearray: np.array
    :return quakearray: de_array containing -1's for detected earthquakes
    """
    T0 = time.time()

    # set exponential template
    sampling_rate_min = sampling_rate * 60
    sampling_rate_half_min = int(sampling_rate_min * (1 / 2))
    sampling_rate_one_one_half_min = int(sampling_rate_min * (3 / 2))
    x = np.linspace(0.002, 6, sampling_rate_min)
    exp_internal = -(x / 2) * 2
    exp_template = np.exp(exp_internal)

    # fill-value arrays if earthquake detected
    nan_fill = np.nan * (np.ones(sampling_rate_min))
    nan_fill_ext = np.nan * (np.ones(sampling_rate_one_one_half_min))

    quakecount = 0
    quakearray = np.array([])
    for S0 in range(0, len(de_array), sampling_rate_min):
        S1 = S0 + sampling_rate_min
        tremor_snippet = de_array[S0:S1]
        exp_correlation = correlate(a=exp_template,
                                    b=tremor_snippet,
                                    shift=len(x))

        if exp_correlation.max() > corr_criteria:
            quakecount += 1
            if S0 == 0:
                quakearray = np.append(quakearray, nan_fill)
            else:
                quakearray_new = quakearray[:S0 - sampling_rate_half_min]
                quakearray = np.append(quakearray_new, nan_fill_ext)
        else:
            quakearray = np.append(quakearray, tremor_snippet)

    if verbose:
        print("[detect_earthquakes] {} quakes".format(quakecount), end=" ")
    print(round(time.time() - T0, 2), 's')

    return quakearray
Beispiel #29
0
def observed_first_arrival(stream):
    # cross correlate all traces and find station with largest shift
    shifts = np.zeros((len(stream), 1))
    maxAmp = np.zeros((len(stream), 1))
    for j in range(len(stream)):
        corr = correlate(stream[0],
                         stream[j],
                         stream[0].stats.npts,
                         normalize='naive',
                         demean=False,
                         method='auto')
        shift, corrCoef = xcorr_max(corr)
        shifts[j] = shift
    stat_idx = np.argmax(shifts)
    first_stat = stream[stat_idx].stats.station
    return first_stat
Beispiel #30
0
    def update_cc_deltat(self, data_virasdf, sync_virasdf):
        if (self.net_sta not in data_virasdf.get_waveforms_list()):
            return
        # we assume the delta and the event_time are the same, but the starttime may be slightly different
        # also we have to make sure the net_sta is existing
        data_wg = data_virasdf.get_waveforms()[self.net_sta]
        sync_wg = sync_virasdf.get_waveforms()[self.net_sta]
        data_tr = data_wg["st"].select(component=self.component)[0].copy()
        sync_tr = sync_wg["st"].select(component=self.component)[0].copy()
        # we make the starttime of sync to be the same with data
        tolerance_time = TOLERANCE_DIFF_TIME
        time_difference = np.abs(sync_tr.stats.starttime -
                                 data_tr.stats.starttime)
        if (time_difference <= data_tr.stats.delta):
            sync_tr.stats.starttime = data_tr.stats.starttime
        elif ((time_difference <= tolerance_time)
              and (data_tr.stats.starttime <= self.left)):
            if (sync_tr.stats.starttime < data_tr.stats.starttime):
                sync_tr.trim(data_tr.stats.starttime, sync_tr.stats.endtime)
                sync_tr.stats.starttime = data_tr.stats.starttime
            else:
                data_tr.trim(sync_tr.stats.starttime, data_tr.stats.endtime)
                sync_tr.stats.starttime = data_tr.stats.starttime
        else:
            # for the case the start time is later than the event time while self.left is earlier than the start time
            # set self.cc as 0 or similarity as 0 will not influence the final result
            self.similarity = 0
            self.deltat = 0
            self.cc = 0

        # cut to the window
        data_win_tr = data_tr.slice(self.left, self.right)
        data_win_tr.taper(0.05, type="hann")
        sync_win_tr = sync_tr.slice(self.left, self.right)
        sync_win_tr.taper(0.05, type="hann")
        # use data as the reference, calculate cc and deltat
        cc_all = correlate(data_win_tr,
                           sync_win_tr,
                           None,
                           demean=False,
                           normalize="naive")
        self.similarity = cc_all[len(cc_all) // 2]
        self.deltat, self.cc = xcorr_max(cc_all, abs_max=False)
        delta = data_tr.stats.delta
        self.deltat = self.deltat * delta
Beispiel #31
0
    def SW_CC(self, SW_env_obs, SW_env_syn):
        # I suppose that your observed traces and synthetic traces are filtered with same bandwidths in same order!

        R_dict = {}
        misfit = np.array([])
        for i in range((len(SW_env_obs))):
            dt = SW_env_obs[i].meta.delta

            cc_obspy = cc.correlate(SW_env_obs[i].data, SW_env_syn[i].data, int(0.25*len(SW_env_obs[i].data)))
            shift, CC = cc.xcorr_max(cc_obspy)

            SW_syn_shift_obspy = self.shift(SW_env_syn[i].data, -shift)

            D = 1 - CC  # Decorrelation
            misfit = np.append(misfit, ((CC - 0.95) ** 2) / (2 * (0.1) ** 2) + np.abs(shift))
            R_dict.update({'%s' % SW_env_obs.traces[i].stats.channel: {'misfit': misfit[i], 'time_shift': shift}})
        sum_misfit = np.sum(misfit)
        return sum_misfit
 def test_xcorr_vs_old_implementation(self):
     """
     Test against output of xcorr from ObsPy<1.1
     """
     # Results of xcorr(self.a, self.b, 15, full_xcorr=True)
     # for ObsPy==1.0.2:
     # -5, 0.9651607597888241
     x = [0.53555336, 0.60748967, 0.67493495, 0.73707491, 0.79313226,
          0.84237607, 0.88413089, 0.91778536, 0.94280034, 0.95871645,
          0.96516076, 0.96363672, 0.95043933, 0.92590109, 0.89047807,
          0.84474328, 0.78377236, 0.71629895, 0.64316805, 0.56526677,
          0.48351386, 0.39884904, 0.31222231, 0.22458339, 0.13687123,
          0.05000401, -0.03513057, -0.11768441, -0.19685756, -0.27190599,
          -0.34214866]
     corr_fun = correlate(self.a, self.b, shift=15)
     shift, corr = xcorr_max(corr_fun)
     np.testing.assert_allclose(corr_fun, x)
     self.assertAlmostEqual(corr, 0.96516076)
     self.assertEqual(shift, -5)
Beispiel #33
0
def proc_tides(ctime, net, sta, loc):
    # Helper function for windowing and calculation
    stime = ctime -5*24*60*60
    etime = ctime +5*24*60*60
    srate = 1
    tidetype = 'semidirunal'
    comps ='LHZ'
    st = get_syns_data(net, sta, loc, stime, etime, srate, tidetype, comps)
    fm, fM = get_fb(tidetype)
    st.filter('bandpass',freqmin=fm, freqmax=fM, zerophase=True, corners=2)
    st.sort()
    # we now have everything and can do the calulation
    #st.trim(ctime-5*24*60*60, ctime + 6*24*60*60)
    st2 = st.select(component='Z')
    cc = correlate(st2[0].data, st2[1].data, 1000)
    shift, val = xcorr_max(cc)
    ptp = np.ptp(st2[0].data)
    ptp2 = np.ptp(st2[1].data)
    return shift, val, ptp, ptp2, st2
Beispiel #34
0
 def update_cc_deltat(self, data_asdf, sync_asdf):
     if (self.net_sta not in data_asdf.waveforms.list()):
         return
     # we assume the delta and the event_time are the same, but the starttime may be slightly different
     # also we have to make sure the net_sta is existing
     data_wg = data_asdf.waveforms[self.net_sta]
     data_tag = data_wg.get_waveform_tags()[0]
     sync_wg = sync_asdf.waveforms[self.net_sta]
     sync_tag = sync_wg.get_waveform_tags()[0]
     data_tr = data_wg[data_tag].select(component=self.component)[0].copy()
     sync_tr = sync_wg[sync_tag].select(component=self.component)[0].copy()
     # we make the starttime of sync to be the same with data
     tolerance_time = 60
     time_difference = np.abs(sync_tr.stats.starttime -
                              data_tr.stats.starttime)
     if (time_difference <= data_tr.stats.delta):
         sync_tr.stats.starttime = data_tr.stats.starttime
     elif ((time_difference <= tolerance_time)
           and (data_tr.stats.starttime <= self.left)):
         # ! may be fixed in the future
         if (sync_tr.stats.starttime < data_tr.stats.starttime):
             sync_tr.trim(data_tr.stats.starttime, sync_tr.stats.endtime)
             sync_tr.stats.starttime = data_tr.stats.starttime
         else:
             data_tr.trim(sync_tr.stats.starttime, data_tr.stats.endtime)
             sync_tr.stats.starttime = data_tr.stats.starttime
     else:
         return
     # cut to the window
     data_win_tr = data_tr.slice(self.left, self.right)
     data_win_tr.taper(0.05, type="hann")
     sync_win_tr = sync_tr.slice(self.left, self.right)
     sync_win_tr.taper(0.05, type="hann")
     # use data as the reference, calculate cc and deltat
     cc_all = correlate(data_win_tr,
                        sync_win_tr,
                        None,
                        demean=False,
                        normalize="naive")
     self.similarity = cc_all[len(cc_all) // 2]
     self.deltat, self.cc = xcorr_max(cc_all, abs_max=False)
     delta = data_tr.stats.delta
     self.deltat = self.deltat * delta
 def test_xcorr_vs_old_implementation(self):
     """
     Test against output of xcorr from ObsPy<1.1
     """
     # Results of xcorr(self.a, self.b, 15, full_xcorr=True)
     # for ObsPy==1.0.2:
     # -5, 0.9651607597888241
     x = [0.53555336, 0.60748967, 0.67493495, 0.73707491, 0.79313226,
          0.84237607, 0.88413089, 0.91778536, 0.94280034, 0.95871645,
          0.96516076, 0.96363672, 0.95043933, 0.92590109, 0.89047807,
          0.84474328, 0.78377236, 0.71629895, 0.64316805, 0.56526677,
          0.48351386, 0.39884904, 0.31222231, 0.22458339, 0.13687123,
          0.05000401, -0.03513057, -0.11768441, -0.19685756, -0.27190599,
          -0.34214866]
     corr_fun = correlate(self.a, self.b, shift=15)
     shift, corr = xcorr_max(corr_fun)
     np.testing.assert_allclose(corr_fun, x)
     self.assertAlmostEqual(corr, 0.96516076)
     self.assertEqual(shift, -5)
Beispiel #36
0
    def update_cc_related(self, obs, syn):
        """
        Use the trace info to update similarity, max_cc and deltat. Always use data as the reference.
        """
        # firstly, we have tp make obs and syn comparable, assume their deltas are the same.
        obs = obs.copy()
        syn = syn.copy()
        # after having processed, the starttime should be the same
        syn.stats.starttime = obs.stats.starttime

        win_obs = obs.slice(self.left, self.right)
        win_syn = syn.slice(self.left, self.right)

        cc = correlate(win_obs, win_syn, None, demean=True)
        shift, value = xcorr_max(cc, abs_max=False)

        self.similarity = cc[len(cc)//2]
        self.deltat = shift*win_obs.stats.delta
        self.max_cc = value
Beispiel #37
0
def corr_coef(reference,
              current,
              shift=None,
              demean=True,
              abs_max=True,
              domain='freq'):
    """
    Return shift and value of maximum of cross-correlation.
    """
    if shift == 0:
        domain = 'time'

    fct = correlate(reference,
                    current,
                    shift=shift,
                    demean=demean,
                    normalize=True,
                    domain=domain)

    return xcorr_max(fct, abs_max=abs_max)
 def test_correlate_extreme_shifts_for_freq_xcorr(self):
     """
     Also test shift=None
     """
     a, b = [1, 2, 3], [1, 2, 3]
     n = len(a) + len(b) - 1
     cc1 = correlate(a, b, 2, method='fft')
     cc2 = correlate(a, b, 3, method='fft')
     cc3 = correlate(a, b, None, method='fft')
     cc4 = correlate(a, b, None, method='direct')
     self.assertEqual(len(cc1), n)
     self.assertEqual(len(cc2), 2 + n)
     self.assertEqual(len(cc3), n)
     self.assertEqual(len(cc4), n)
     a, b = [1, 2, 3], [1, 2]
     n = len(a) + len(b) - 1
     cc1 = correlate(a, b, 2, method='fft')
     cc2 = correlate(a, b, 3, method='fft')
     cc3 = correlate(a, b, None, method='fft')
     cc4 = correlate(a, b, None, method='direct')
     self.assertEqual(len(cc1), n)
     self.assertEqual(len(cc2), 2 + n)
     self.assertEqual(len(cc3), n)
     self.assertEqual(len(cc4), n)
 def test_correlate_different_length_of_signals(self):
     # Signals are aligned around the middle
     cc = correlate(self.a, self.c, 50)
     shift, _ = xcorr_max(cc)
     self.assertEqual(shift, -5 - (len(self.a) - len(self.c)) // 2)