예제 #1
0
파일: test_pac.py 프로젝트: dengemann/pacpy
def test_glm():
    """
    Test PAC function: GLM
    1. Confirm consistency of output with example data
    2. Confirm consistency of output with example data using iir filter
    3. Confirm PAC=1 when expected
    4. Confirm PAC=0 when expected
    """
    # Load data
    data = np.load(os.path.dirname(pacpy.__file__) + '/tests/exampledata.npy')
    assert np.allclose(
        glm(data, data, (13, 30), (80, 200)), 0.03191, atol=10 ** -5)
    assert np.allclose(
        glm(data, data, (13, 30), (80, 200), filterfn=butterf), 0.03476, atol=10 ** -5)

    # Test that the GLM function outputs close to 0 and 1 when expected
    lo, hi = genPAC1(glm_bias=True)
    assert glm(lo, hi, (4, 6), (90, 110)) > 0.99

    lo, hi = genPAC0()
    assert glm(lo, hi, (4, 6), (90, 110)) < 0.01
    
    # Test that Filterfn = False works as expected
    datalo = firf(data, (13,30))
    datahi = firf(data, (80,200))
    pha = np.angle(hilbert(datalo))
    amp = np.abs(hilbert(datahi))
    assert np.allclose(
        glm(pha, amp, (13, 30), (80, 200), filterfn=False),
        glm(data, data, (13, 30), (80, 200)), atol=10 ** -5)
예제 #2
0
def Envelope(wsyn, wobs, nt, dt, eps=0.05):
    # envelope difference
    esyn = abs(hilbert(wsyn))
    eobs = abs(hilbert(wobs))
    etmp = (esyn - eobs)/(esyn + eps*esyn.max())
    wadj = etmp*wsyn - np.imag(hilbert(etmp*np.imag(hilbert(wsyn))))
    return wadj
예제 #3
0
파일: test_pac.py 프로젝트: dengemann/pacpy
def test_ozkurt():
    """
    Test PAC function: Ozkurt
    1. Confirm consistency of output with example data
    2. Confirm consistency of output with example data using iir filter
    3. Confirm PAC=1 when expected
    4. Confirm PAC=0 when expected
    """
    # Load data
    data = np.load(os.path.dirname(pacpy.__file__) + '/tests/exampledata.npy')
    assert np.allclose(
        ozkurt(data, data, (13, 30), (80, 200)), 0.07548, atol=10 ** -5)
    assert np.allclose(
        ozkurt(data, data, (13, 30), (80, 200), filterfn=butterf), 0.07555, atol=10 ** -5)

    # Test that the Ozkurt PAC function outputs close to 0 and 1 when expected
    lo, hi = genPAC1(phabias=.2, fhi=300)
    hif = firf(hi, (100, 400))
    amp = np.abs(hilbert(hif))
    weight = (np.sqrt(len(amp)) * np.sqrt(np.sum(amp ** 2))) / np.sum(amp)
    assert ozkurt(lo, hi, (4, 6), (100, 400)) * weight > 0.99

    lo, hi = genPAC0()
    assert ozkurt(lo, hi, (4, 6), (90, 110)) < 0.001
    
    # Test that Filterfn = False works as expected
    datalo = firf(data, (13,30))
    datahi = firf(data, (80,200))
    pha = np.angle(hilbert(datalo))
    amp = np.abs(hilbert(datahi))
    assert np.allclose(
        ozkurt(pha, amp, (13, 30), (80, 200), filterfn=False),
        ozkurt(data, data, (13, 30), (80, 200)), atol=10 ** -5)
예제 #4
0
파일: test_pac.py 프로젝트: dengemann/pacpy
def test_mi_canolty():
    """
    Test PAC function: Canolty MI
    1. Confirm consistency of output with example data
    2. Confirm consistency of output with example data using iir filter
    3. Confirm PAC=1 when expected
    4. Confirm PAC=0 when expected
    """
    # Load data
    data = np.load(os.path.dirname(pacpy.__file__) + '/tests/exampledata.npy')
    assert np.allclose(
        mi_canolty(data, data, (13, 30), (80, 200)), 1.10063, atol=10 ** -5)
    assert np.allclose(mi_canolty(
        data, data, (13, 30), (80, 200), filterfn=butterf), 1.14300, atol=10 ** -5)

    # Test that the Canolty MI function outputs close to 0 and 1 when expected
    lo, hi = genPAC1(phabias=.2, fhi=300)
    hif = firf(hi, (100, 400))
    amp = np.abs(hilbert(hif))
    assert mi_canolty(lo, hi, (4, 6), (100, 400)) / np.mean(amp) > 0.99

    lo, hi = genPAC0()
    assert mi_canolty(lo, hi, (4, 6), (90, 110)) < 0.001
    
    # Test that Filterfn = False works as expected
    datalo = firf(data, (13,30))
    datahi = firf(data, (80,200))
    pha = np.angle(hilbert(datalo))
    amp = np.abs(hilbert(datahi))
    assert np.allclose(
        mi_canolty(pha, amp, (13, 30), (80, 200), filterfn=False),
        mi_canolty(data, data, (13, 30), (80, 200)), atol=10 ** -5)
예제 #5
0
파일: pac.py 프로젝트: dengemann/pacpy
def pa_series(lo, hi, f_lo, f_hi, fs=1000, filterfn=None, filter_kwargs=None):
    """
    Calculate the phase and amplitude time series

    Parameters
    ----------
    lo : array-like, 1d
        The low frequency time-series to use as the phase component
    hi : array-like, 1d
        The high frequency time-series to use as the amplitude component
    f_lo : (low, high), Hz
        The low frequency filtering range
    f_hi : (low, high), Hz
        The low frequency filtering range
    fs : float
        The sampling rate (default = 1000Hz)
    filterfn : function
        The filtering function, `filterfn(x, f_range, filter_kwargs)`
    filter_kwargs : dict
        Keyword parameters to pass to `filterfn(.)`

    Returns
    -------
    pha : array-like, 1d
        Time series of phase
    amp : array-like, 1d
        Time series of amplitude
        
    Usage
    -----
    >>> import numpy as np
    >>> from scipy.signal import hilbert
    >>> from pacpy.pac import pa_series
    >>> t = np.arange(0, 10, .001) # Define time array
    >>> lo = np.sin(t * 2 * np.pi * 6) # Create low frequency carrier
    >>> hi = np.sin(t * 2 * np.pi * 100) # Create modulated oscillation
    >>> hi[np.angle(hilbert(lo)) > -np.pi*.5] = 0 # Clip to 1/4 of cycle
    >>> pha, amp = pa_series(lo, hi, (4,8), (80,150))
    >>> print pha
    [-1.57079633 -1.53192376 -1.49301802 ..., -1.64840672 -1.6095709 -1.57079634]
    """

    # Arg check
    _x_sanity(lo, hi)
    _range_sanity(f_lo, f_hi)

    # Filter setup
    if filterfn is None:
        filterfn = firf
        filter_kwargs = {}

    # Filter
    xlo = filterfn(lo, f_lo, fs, **filter_kwargs)
    xhi = filterfn(hi, f_hi, fs, **filter_kwargs)

    # Calculate phase time series and amplitude time series
    pha = np.angle(hilbert(xlo))
    amp = np.abs(hilbert(xhi))

    return pha, amp
예제 #6
0
파일: util.py 프로젝트: billtr0n/pyorogeny
def envelope_function(x, y, z, ttime, dt):
    '''Compute envelope function based on hilbert transform

    Parameters
    ----------
    x: numpy.array
      time series

    y: numpy.array
      time series     

    z: numpy.array
      time series     

    ttime: float
      total time for integral

    dt: float
      time interval of time-series

    Returns
    -------
    (f, out): (numpy.array, numpy.array)
      frequency vector for plotting
      fourier amplitude spectrum
    '''
    sim_ind = np.floor(ttime/dt)
    analytic_x = np.absolute(sig.hilbert(x))
    analytic_y = np.absolute(sig.hilbert(y))
    analytic_z = np.absolute(sig.hilbert(z))
    return (np.sum(analytic_x[:sim_ind])*dt,
            np.sum(analytic_y[:sim_ind])*dt,
            np.sum(analytic_z[:sim_ind])*dt)
예제 #7
0
def ediff(wsyn, wobs, nt, dt, eps=0.05):
    # envelope difference
    esyn = abs(_signal.hilbert(wsyn))
    eobs = abs(_signal.hilbert(wobs))
    etmp = (esyn - eobs)/(esyn + eps*esyn.max())
    wadj = etmp*wsyn - _np.imag(_signal.hilbert(etmp*_np.imag(_signal.hilbert(wsyn))))
    return wadj
예제 #8
0
파일: measure.py 프로젝트: fmagnoni/pycmt3d
def compute_envelope_matrix_theo(dsyn, obsd, synt, dt, win_idx, taper):
    """
    Compute envelope measurements matrix H and misfit vector G theoretically
    as stated in Appendix in Qinya's paper.
    Attension: not used!!! BUG inside!!!
    """
    istart = win_idx[0]
    iend = win_idx[1]

    syn_array = synt.copy()
    obs_array = obsd.copy()

    syn_analytic = hilbert(taper * syn_array[istart:iend])
    syn_hilbert = np.imag(syn_analytic)
    syn_env = np.abs(syn_analytic)
    dsyn_hilbert = np.imag(hilbert(dsyn))
    env_derivss = \
        syn_env ** (-0.5) * (syn_array[istart:iend] * dsyn +
                             syn_hilbert * dsyn_hilbert)

    A1 = np.dot(env_derivss, env_derivss.transpose()) * dt
    b1 = np.sum(
        (np.abs(hilbert(taper * obs_array[istart:iend])) -
         np.abs(hilbert(taper * syn_array[istart:iend]))) *
        env_derivss * dt, axis=1)
    return A1, b1
예제 #9
0
파일: test_pac.py 프로젝트: TomDLT/pacpy
def test_plv():
    """
    Test PAC function: PLV.
    1. Confirm consistency of output with example data
    2. Confirm consistency of output with example data using firfls filter
    3. Confirm PAC=1 when expected
    4. Confirm PAC=0 when expected
    """
    # Load data
    data = np.load(os.path.dirname(pacpy.__file__) + '/tests/exampledata.npy')
    assert np.allclose(
        plv(data, data, (13, 30), (80, 200)), 0.25114, atol=10 ** -5)
    assert np.allclose(
        plv(data, data, (13, 30), (80, 200), filterfn=firfls), 0.24715, atol=10 ** -5)

    # Test that the PLV function outputs close to 0 and 1 when expected
    lo, hi = genPAC1()
    assert plv(lo, hi, (4, 6), (90, 110)) > 0.99

    lo, hi = genPAC0()
    assert plv(lo, hi, (4, 6), (90, 110)) < 0.01

    # Test that Filterfn = False works as expected
    datalo = firf(data, (13, 30))
    datahi = firf(data, (80, 200))
    datahiamp = np.abs(hilbert(datahi))
    datahiamplo = firf(datahiamp, (13, 30))
    pha1 = np.angle(hilbert(datalo))
    pha2 = np.angle(hilbert(datahiamplo))
    pha1, pha2 = _trim_edges(pha1, pha2)
    assert np.allclose(
        plv(pha1, pha2, (13, 30), (80, 200), filterfn=False),
        plv(data, data, (13, 30), (80, 200)), atol=10 ** -5)
예제 #10
0
파일: tf.py 프로젝트: jmxpearson/physutils
def phase_amplitude_coupling(fser, gser, lag=0):
    """
    Compute the product of two time series for calculation of phase-amplitude
    coupling. That is, if the Hilbert transform of fser is A_f exp(\phi_f),
    then the phase-amplitude product of fser and gser is 
    f * g = A_f exp(\phi_g). Lag is the offset of f's amplitude from g's
    phase: A(t) exp(\phi(t - lag)). This is useful for examining asymptotic
    statistics in large-lag regime, where biases in the joint distribution
    of A_f and \phi_g are not due to phase-amplitude coupling, but to the 
    individual signals (cf. Canolty et al., Sciece, 2006, SI).
    Function returns a series of the same length, but first and last lag 
    elements are NaN.
    """
    fh = ssig.hilbert(fser)
    gh = ssig.hilbert(gser)

    Af = np.abs(fh)
    ephig = gh / np.abs(gh)

    if lag == 0:
        pac = Af * ephig
    else:
        pac = Af * np.roll(ephig, lag)
        pac[:lag] = np.nan
        pac[-lag:] = np.nan

    return pac
예제 #11
0
def envelope(data):
    """
    Envelope of a signal.

    Computes the envelope of the given data which can be windowed or
    not. The envelope is determined by the absolute value of the analytic
    signal of the given data.

    If data are windowed the analytic signal and the envelope of each
    window is returned.

    :type data: :class:`~numpy.ndarray`
    :param data: Data to make envelope of.
    :return: **A_cpx, A_abs** - Analytic signal of input data, Envelope of
        input data.
    """
    nfft = util.next_pow_2(data.shape[-1])
    a_cpx = np.zeros((data.shape), dtype=np.complex64)
    a_abs = np.zeros((data.shape), dtype=np.float64)
    if len(data.shape) > 1:
        i = 0
        for row in data:
            a_cpx[i, :] = signal.hilbert(row, nfft)
            a_abs[i, :] = abs(signal.hilbert(row, nfft))
            i = i + 1
    else:
        a_cpx = signal.hilbert(data, nfft)
        a_abs = abs(signal.hilbert(data, nfft))
    return a_cpx, a_abs
예제 #12
0
파일: affine.py 프로젝트: jaidevd/pytftb
 def _normalize(self):
     SP1 = np.fft.fft(hilbert(self.s1), axis=0)
     SP2 = np.fft.fft(hilbert(self.s2), axis=0)
     indmin = 1 + int(np.round(self.fmin * (self.ts.shape[0] - 2)))
     indmax = 1 + int(np.round(self.fmax * (self.ts.shape[0] - 2)))
     sp1_ana = SP1[(indmin - 1):indmax]
     sp2_ana = SP2[(indmin - 1):indmax]
     return sp1_ana, sp2_ana
예제 #13
0
    def _process(self):
        print(" - - - In Processing : Hight={}, Width={}".format(self.yarp_image.height(), self.yarp_image.width()))
        
        # Make this into a numpy array
        self._convert()

        beamedAudio = beamformer(self.matrix, rms=False)
        numSamps = beamedAudio.shape[2]

        showEnv = True

        if showEnv:
            
            oneBeam = np.zeros((1, 1, numSamps), dtype=np.float32)
            oneBeam[0][0] = beamedAudio[7][21]

            print(" - - - - Begin Hilbert.")
            analytic_signal = hilbert(oneBeam)
            amplitude_envelope = np.abs(analytic_signal)
            band_passed_amp = bandPass(amplitude_envelope, 5.0, numSamps, 48000)
            
            amp_oneBeam = oneBeam[0][0].copy()
            amp_oneBeam[amp_oneBeam < 0.0] = 0.0

            # Plot.
            self.fig.clear()

            ax0 = self.fig.add_subplot(311)
            ax0.set_ylim(-1, 1)
            ax0.plot(oneBeam[0][0])
            ax0.plot(amp_oneBeam)


            ax1 = self.fig.add_subplot(312)
            ax1.set_ylim(-1, 1)
            ax1.plot(analytic_signal[0][0])
            ax1.plot(amplitude_envelope[0][0])
            
            ax2 = self.fig.add_subplot(313)
            ax2.set_ylim(-1, 1)
            ax2.plot(band_passed_amp[0][0])


        else:
            print(" - - - - Begin Hilbert.")
            analytic_signal    = hilbert(beamedAudio)
            amplitude_envelope = np.abs(analytic_signal)

            print(" - - - - Begin Band Pass.")
            band_passed_amp = bandPass(amplitude_envelope, 5.0, numSamps, 48000)

            reduced_band_pass = np.sqrt(np.sum(band_passed_amp**2, axis=2)) / numSamps

            self.fig.clear()
            plt.imshow(reduced_band_pass)


        plt.pause(0.005)
예제 #14
0
def InstantaneousPhase(wsyn, wobs, nt, dt, eps=0.05):
    # instantaneous phase 
    r = np.real(hilbert(wsyn))
    i = np.imag(hilbert(wsyn))
    phi_syn = np.arctan2(i,r)

    r = np.real(hilbert(wobs))
    i = np.imag(hilbert(wobs))
    phi_obs = np.arctan2(i,r)

    phi_rsd = phi_syn - phi_obs
    return np.sqrt(np.sum(phi_rsd*phi_rsd*dt))
예제 #15
0
파일: pyhht.py 프로젝트: 198401/pyhht
def symmetrydemo():
    a=sin(linspace(-5*pi,5*pi,10000))
    b=a+2
    c=a-0.5
    ah,bh,ch=hilbert(a),hilbert(b),hilbert(c)
    ph_a,ph_b,ph_c=unwrap(angle(ah)),unwrap(angle(bh)),unwrap(angle(ch))
    omega_a=diff(ph_a)
    omega_b=diff(ph_b)
    omega_c=diff(ph_c)
    subplot(211),plot(ph_a),plot(ph_b),plot(ph_c)
    subplot(212),plot(omega_a),plot(omega_b),plot(omega_c)
    grid()
    show()
    return a,b,c
예제 #16
0
def _filter_ph_am(xph, xam, f_ph, f_am, sfreq, filterfn=None, kws_filt=None):
    """Aux function for phase/amplitude filtering for one pair of channels"""
    from pacpy.pac import _range_sanity
    from scipy.signal import hilbert
    filterfn = band_pass_filter if filterfn is None else filterfn
    kws_filt = {} if kws_filt is None else kws_filt

    # Filter the two signals + hilbert/phase
    _range_sanity(f_ph, f_am)
    xph = filterfn(xph, sfreq, *f_ph)
    xam = filterfn(xam, sfreq, *f_am)

    xph = np.angle(hilbert(xph))
    xam = np.abs(hilbert(xam))
    return xph, xam
예제 #17
0
def square_envelope(corr_o,corr_s,g_speed,
    window_params):
    success = False
    env_s = corr_s.data**2 + np.imag(hilbert(corr_s.data))**2
    env_o = corr_o.data**2 + np.imag(hilbert(corr_o.data))**2
    d_env_1 =  2. * corr_s.data 
    d_env_2 =  (2. * np.imag(hilbert(corr_s.data)))

    u1 = (env_s - env_o) * d_env_1
    u2 = np.imag(hilbert((env_s - env_o) * d_env_2))

    adjt_src = u1 - u2
    
    success = True
    return adjt_src, success
예제 #18
0
 def __init__(self,hfoObj):
     #signal = sig.detrend(hfoObj.waveform[hfoObj.start_idx:hfoObj.end_idx,0]) # detrending
     fs = hfoObj.sample_rate        
    
     signal = sig.detrend(hfoObj.waveform[3*fs/4:5*fs/4,0])
     PhaseFreqVector= np.arange(1,31,1)
     AmpFreqVector= np.arange(30,990,5)
     PhaseFreq_BandWidth=1
     AmpFreq_BandWidth=10
     Comodulogram=np.zeros((PhaseFreqVector.shape[0],AmpFreqVector.shape[0]))
     nbin=18
     position=np.zeros(nbin)
     winsize = 2*np.pi/nbin
     for j in range(nbin):
         position[j] = -np.pi+j*winsize;
     PHASES = np.zeros((PhaseFreqVector.shape[0],signal.shape[0]))
     for idx,Pf1 in enumerate(PhaseFreqVector):
         print Pf1,
         Pf2 = Pf1 + PhaseFreq_BandWidth
         if signal.shape[0] > 18*np.fix(fs/Pf1):
             b = sig.firwin(3*np.fix(fs/Pf1),[Pf1,Pf2],pass_zero=False,window=('kaiser',0.5),nyq=fs/2)
         else:
             b = sig.firwin(signal.shape[0]/6,[Pf1,Pf2],pass_zero=False,window=('kaiser',0.5),nyq=fs/2)
         PhaseFreq = sig.filtfilt(b,np.array([1]),signal)
         Phase=np.angle(sig.hilbert(PhaseFreq))
         PHASES[idx,:]=Phase;
     print    
     for idx1,Af1 in enumerate(AmpFreqVector):
         print Af1,
         Af2 = Af1 + AmpFreq_BandWidth
         if signal.shape[0] > 18*np.fix(fs/Af1):
             b = sig.firwin(3*np.fix(fs/Af1),[Af1,Af2],pass_zero=False,window=('kaiser',0.5),nyq=fs/2)
         else:
             b = sig.firwin(np.fix(signal.shape[0]/6),[Af1,Af2],pass_zero=False,window=('kaiser',0.5),nyq=fs/2)
         AmpFreq = sig.filtfilt(b,np.array([1]),signal)
         Amp=np.abs(sig.hilbert(AmpFreq))
         for idx2,Pf1 in enumerate(PhaseFreqVector):
             Phase = PHASES[idx2]
             MeanAmp = np.zeros(nbin)
             for j in range(nbin):
                 bol1 = Phase < position[j]+winsize
                 bol2 = Phase >= position[j]
                 I = np.nonzero(bol1 & bol2)[0]
                 MeanAmp[j]=np.mean(Amp[I])
             #MI=(np.log(nbin)-(-np.sum((MeanAmp/np.sum(MeanAmp))*np.log((MeanAmp/np.sum(MeanAmp))))))/np.log(nbin)
             MI =np.log(nbin)-(stat.entropy(MeanAmp)/np.log(nbin))
             Comodulogram[idx2,idx1]=MI;
     plt.contourf(PhaseFreqVector+PhaseFreq_BandWidth/2,AmpFreqVector+AmpFreq_BandWidth/2,Comodulogram.T,100)
def to_envelopes(path,num_bands,freq_lims,window_length=None,time_step=None):
    sr, proc = preproc(path,alpha=0.97)
    proc = proc/sqrt(mean(proc**2))*0.03;
    bandLo = [ freq_lims[0]*exp(log(freq_lims[1]/freq_lims[0])/num_bands)**x for x in range(num_bands)]
    bandHi = [ freq_lims[0]*exp(log(freq_lims[1]/freq_lims[0])/num_bands)**(x+1) for x in range(num_bands)]
    if window_length is not None and time_step is not None:
        use_windows = True
        nperseg = int(window_length*sr)
        noverlap = int(time_step*sr)
        window = hanning(nperseg+2)[1:nperseg+1]
        step = nperseg - noverlap
        indices = arange(0, proc.shape[-1]-nperseg+1, step)
        num_frames = len(indices)
        envelopes = zeros((num_bands,num_frames))
    else:
        use_windows=False
        sr_env = 120
        t = len(proc)/sr
        numsamp = ceil(t * sr_env)
        envelopes = []
    for i in range(num_bands):
        b, a = butter(2,(bandLo[i]/(sr/2),bandHi[i]/(sr/2)), btype = 'bandpass')
        env = filtfilt(b,a,proc)
        env = abs(hilbert(env))
        if use_windows:
            window_sums = []
            for k,ind in enumerate(indices):
                seg = env[ind:ind+nperseg] * window
                window_sums.append(sum(seg))
            envelopes[i,:] = window_sums
        else:
            env = resample(env,numsamp)
            envelopes.append(env)
    return array(envelopes).T
예제 #20
0
def to_gammatone(path,num_bands,freq_lims):
    sr, proc = preproc(path,alpha=0)

    proc = proc / 32768 #hack!! for 16-bit pcm
    cfs = make_erb_cfs(freq_lims,num_bands)

    filterOrder = 4 # filter order
    gL = 2**nextpow2(0.128*sr) # gammatone filter length at least 128 ms
    b = 1.019*24.7*(4.37*cfs/1000+1) # rate of decay or bandwidth

    tpt=(2*pi)/sr
    gain=((1.019*b*tpt)**filterOrder)/6 # based on integral of impulse

    tmp_t = arange(gL)/sr

    envelopes = []
    bms = []

    # calculate impulse response
    for i in range(num_bands):
        gt = gain[i]*sr**3*tmp_t**(filterOrder-1)*exp(-2*pi*b[i]*tmp_t)*cos(2*pi*cfs[i]*tmp_t)
        bm = fftfilt(gt,proc)
        bms.append(bm)
        env = abs(hilbert(bm))
        envelopes.append(env)
    return array(bms).T,array(envelopes).T
예제 #21
0
def HLB(signal, sr=1893.9393939393942):
    '''
    Do the Hilbert transform of the data, and returns the analytic signal,
    envelope, instantaneous phase, instantaneous frequency and analytic phase
    of the signal.
    TODO: also use the angular freq? (Hurtado Rubchinsky & Sigvardt 2004)

    Params:
    ------
        signal: array
            Signal to be analysed

    Returns:
    -------
     ana_sig: array
         analytic signal
     envelope: array
         instantaneous amplitude
     in_phase: array
         instantaneous phase
     in_freq: array
         instantaneous frequency
     ana_phase: array
         analytic phase
    '''
    signal = np.asarray(signal)
    ana_sig = hilbert(signal)
    envelope = np.abs(ana_sig)
    ins_phase = np.unwrap(np.angle(ana_sig))
    insF = np.diff(ins_phase) * (sr/(2.0*np.pi))
    ins_freq = np.hstack((0, insF))
    ana_phase = np.arctan2(np.imag(ana_sig), np.real(ana_sig))
    return ana_sig, envelope, ins_phase, ins_freq, ana_phase
 def findDelayIdx(paData, fs):
     """
     find the delay value from the first few samples on A-lines
     """
     nSteps = paData.shape[1]
     refImpulse = paData[0:100, :]
     refImpulseEnv = np.abs(spsig.hilbert(refImpulse, axis=0))
     impuMax = np.amax(refImpulseEnv, axis=0)
     # to be consistent with the MATLAB's implementation ddof=1
     tempStd = np.std(refImpulseEnv, axis=0, ddof=1)
     delayIdx = -np.ones(nSteps)*18/fs
     for n in range(nSteps):
         if (impuMax[n] > 3.0*tempStd[n] and impuMax[n] > 0.1):
             tmpThresh = 2*tempStd[n]
             m1 = 14
             for ii in range(14, 50):
                 if refImpulse[ii-1, n] > -tmpThresh and\
                         refImpulse[ii, n] < -tmpThresh:
                     m1 = ii
                     break
             m2 = m1
             m3 = m1
             for ii in range(9, m1+1):
                 if refImpulse[ii-1, n] < tmpThresh and\
                         refImpulse[ii, n] > tmpThresh:
                     m2 = ii
                 if refImpulse[ii-1, n] > tmpThresh and\
                         refImpulse[ii, n] < tmpThresh:
                     m3 = ii
             delayIdx[n] = -float(m2+m3+2)/2.0/fs
     return delayIdx
예제 #23
0
파일: amfm3d.py 프로젝트: alvarouc/amfm
def qea(im):
    H = ss.hilbert(im,axis = 2)
    H = im+1j*H
    ia = np.abs(H)
    ip = np.angle(H)

    h1col = H[1:-1,:,:]
    h0col = H[:-2,:,:]
    h2col = H[2:,:,:]
    ifColSign = np.sign(np.real((h0col-h2col)/(2j*h1col)))
    ifCol = np.arccos((h2col+h0col)/(2*h1col))
    ifCol = (np.abs(ifCol)*ifColSign)/np.pi/2

    ifCol = np.pad(ifCol,((1,1),(0,0),(0,0)), mode='reflect')
    
    h0row = H[:,:-2,:]
    h1row = H[:,1:-1,:]
    h2row = H[:,2:,:]
    #ifxSign = np.sign(np.real((h2x-h0x)/(2j*h1x)))
    ifRow = np.arccos((h2row+h0row)/(2*h1row))
    ifRow = (np.abs(ifRow))/np.pi/2

    ifRow = np.pad(ifRow,((0,0),(1,1),(0,0)), mode='reflect')

    h0time = H[:,:,:-2]
    h1time = H[:,:,1:-1]
    h2time = H[:,:,2:]
    #ifxSign = np.sign(np.real((h2x-h0x)/(2j*h1x)))
    ifTime = np.arccos((h2time+h0time)/(2*h1time))
    ifTime = (np.abs(ifTime))/np.pi/2

    ifTime = np.pad(ifTime,((0,0),(0,0),(1,1)), mode='reflect')
    
    return(ia,ip,ifRow,ifCol,ifTime)
예제 #24
0
def anasing(n_points, t0=None, h=0.0):
    """Lipschitz singularity.

    :param n_points: number of points in time.
    :param t0: time localization of singularity
    :param h: strength of the singularity
    :type n_points: int
    :type t0: float
    :type h: float
    :return: N-point Lipschitz singularity centered around t0
    :rtype: numpy.ndarray
    :Examples:
    >>> x = anasing(128)
    >>> plot(real(x))

    .. plot:: docstring_plots/generators/analytic_signals/anasing.py
    """
    """Refer to the wiki page on `Lipschitz condition`, good test case."""
    if t0 is None:
        t0 = n_points / 2.0
    if h <= 0:
        start, end = 1.0 / n_points, 0.5 - 1.0 / n_points
        N = end / start
        f = np.linspace(start, end, N)
        y = np.zeros((n_points / 2.0,), dtype=complex)
        y[1:n_points / 2] = (f ** (-1 - h)) * np.exp(-1j * 2 * pi * f * (t0 - 1))
        x = np.real(np.fft.ifft(y, n_points))
        x = x / x.max()
        x = x - np.sign(x.min()) * np.abs(x.min())
    else:
        t = np.arange(n_points)
        x = np.abs(t - t0) ** h
        x = x.max() - x
    x = hilbert(x)
    return x
예제 #25
0
def amplitude_envelope(signal, fs):
    """Instantaneous amplitude of tone.
    
    .. seealso:: :func:`scipy.signal.hilbert`
    
    """
    return np.abs(hilbert(signal))
예제 #26
0
def instantaneous_phase(signal, fs):
    """Instantaneous phase of tone.
    
    .. seealso:: :func:`scipy.signal.hilbert`
    
    """
    return np.angle(hilbert(signal))
예제 #27
0
def Pmatrix(Np=5, frqs=frqs, f0=4.5e9, vf=3488.0, rs=-1j*0.03, Cs=4.07e-10, W=25.0e-6):
    ts=sqrt(1+rs**2)
    lbda0=vf/f0
    p=lbda0/4.0
    #N=4*Np-3
    #Np=int((N+3)/4.0)

    C=sqrt(2.0)*Np*W*Cs
    Pmat=array([P_one_freq(Np=Np, f=f, p=p, ts=ts, rs=rs) for f in frqs])#, dtype=complex)
    #print shape(Pmat)
    (P11, P12, P13,
     P21, P22, P23,
     P31, P32, Ga)=(Pmat[:,0], Pmat[:,1], Pmat[:,2],
                    Pmat[:,3], Pmat[:,4], Pmat[:,5],
                    Pmat[:,6], Pmat[:,7], absolute(Pmat[:,8]))
    Ba=-imag(hilbert(Ga))
    w=2*pi*frqs
    P33=Ga+1j*Ba+1j*w*C
    if 0:
        line(frqs, Ga, pl=pl)
        line(frqs, Ba, pl=pl)

    return (P11, P12, P13,
            P21, P22, P23,
            P31, P32, P33)
예제 #28
0
파일: timefreq.py 프로젝트: mschachter/LaSP
def bandpass_timefreq(s, frequencies, sample_rate):
    """
        Bandpass filter signal s at the given frequency bands, and then use the Hilber transform
        to produce a complex-valued time-frequency representation of the bandpass filtered signal.
    """

    freqs = sorted(frequencies)
    tf_raw = np.zeros([len(frequencies), len(s)], dtype='float')
    tf_freqs = list()

    for k,f in enumerate(freqs):
        #bandpass filter signal
        if k == 0:
            tf_raw[k, :] = lowpass_filter(s, sample_rate, f)
            tf_freqs.append( (0.0, f) )
        else:
            tf_raw[k, :] = bandpass_filter(s, sample_rate,  freqs[k-1], f)
            tf_freqs.append( (freqs[k-1], f) )

    #compute analytic signal
    tf = hilbert(tf_raw, axis=1)
    #print 'tf_raw.shape=',tf_raw.shape
    #print 'tf.shape=',tf.shape

    return np.array(tf_freqs),tf_raw,tf
예제 #29
0
파일: amfm1d.py 프로젝트: ecastrow/pl2mind
def qea(im):
    """
    Quasi-eigen approximation function.

    Parameters
    ----------
    im: array_like
        1d vector that contains a time series

    Returns
    -------
    ia: array_like
        instantaneous amplitude
    ip: array_like
        instantaneous phase
    ifeq: array_like
        instantaneous frequency
    """
    im = im.ravel()
    # computes analytic signal
    H = ss.hilbert(im)
    H = im + 1j * H
    # obtain IA and IP from analytic signal
    ia = np.abs(H)
    ip = np.angle(H)
    # obtain IF using QEA function
    h1 = H[1:-1]
    h0 = H[:-2]
    h2 = H[2:]
    ifeq = np.real(np.arccos((h2 + h0) / (2 * h1)) / np.pi / 2)
    # pad extremes copying
    ifeq = np.hstack((ifeq[:1], ifeq, ifeq[-1:]))
    return(ia, ip, ifeq)
예제 #30
0
파일: noise.py 프로젝트: jaidevd/pytftb
def noisecu(n_points):
    """Compute analytic complex uniform white noise.

    :param n_points: Length of the noise signal.
    :type n_points: int
    :return: analytic complex uniform white noise signal of length N
    :rtype: numpy.ndarray
    :Examples:
    >>> import numpy as np
    >>> noise = noisecu(512)
    >>> print("%.2f" % abs((noise ** 2).mean()))
    0.00
    >>> print("%.1f" % np.std(noise) ** 2)
    1.0
    >>> subplot(211), plot(real(noise))                                              #doctest: +SKIP
    >>> subplot(212), plot(linspace(-0.5, 0.5, 512), abs(fftshift(fft(noise))) ** 2) #doctest: +SKIP

    .. plot:: docstring_plots/generators/noise/noisecu.py
    """
    if n_points <= 2:
        noise = (np.random.rand(n_points, 1) - 0.5 + 1j * (np.random.rand(n_points, 1) - 0.5)) * np.sqrt(6)
    else:
        noise = np.random.rand(2 ** int(nextpow2(n_points)),) - 0.5
        noise = hilbert(noise) / noise.std() / np.sqrt(2)
        inds = noise.shape[0] - np.arange(n_points - 1, -1, step=-1) - 1
        noise = noise[inds]
    return noise
예제 #31
0
    def features(x: pd.Series) -> pd.DataFrame:
        feature_dict = pd.DataFrame(dtype=np.float64)
        seg_id = 1

        # lists with parameters to iterate over them
        percentiles = [1, 5, 10, 20, 25, 30, 40, 50, 60, 70, 75, 80, 90, 95, 99]
        hann_windows = [50, 150, 1500, 15000]
        spans = [300, 3000, 30000, 50000]
        windows = [10, 50, 100, 500, 1000, 10000]

        # basic stats
        feature_dict.loc[seg_id, 'mean'] = x.mean()
        feature_dict.loc[seg_id, 'std'] = x.std()
        feature_dict.loc[seg_id, 'max'] = x.max()
        feature_dict.loc[seg_id, 'min'] = x.min()

        # basic stats on absolute values
        feature_dict.loc[seg_id, 'mean_change_abs'] = np.mean(np.diff(x))
        feature_dict.loc[seg_id, 'abs_max'] = np.abs(x).max()
        feature_dict.loc[seg_id, 'abs_mean'] = np.abs(x).mean()
        feature_dict.loc[seg_id, 'abs_std'] = np.abs(x).std()

        # geometric and harminic means
        feature_dict.loc[seg_id, 'hmean'] = stats.hmean(np.abs(x[np.nonzero(x)[0]]))
        feature_dict.loc[seg_id, 'gmean'] = stats.gmean(np.abs(x[np.nonzero(x)[0]]))

        # k-statistic and moments
        for i in range(1, 5):
            feature_dict.loc[seg_id, f'kstat_{i}'] = stats.kstat(x, i)
            feature_dict.loc[seg_id, f'moment_{i}'] = stats.moment(x, i)

        for i in [1, 2]:
            feature_dict.loc[seg_id, f'kstatvar_{i}'] = stats.kstatvar(x, i)

        # aggregations on various slices of data
        for agg_type, slice_length, direction in product(['std', 'min', 'max', 'mean'], [1000, 10000, 50000],
                                                         ['first', 'last']):
            if direction == 'first':
                feature_dict.loc[seg_id, f'{agg_type}_{direction}_{slice_length}'] = x[:slice_length].agg(agg_type)
            elif direction == 'last':
                feature_dict.loc[seg_id, f'{agg_type}_{direction}_{slice_length}'] = x[-slice_length:].agg(agg_type)

        feature_dict.loc[seg_id, 'max_to_min'] = x.max() / np.abs(x.min())
        feature_dict.loc[seg_id, 'max_to_min_diff'] = x.max() - np.abs(x.min())
        feature_dict.loc[seg_id, 'count_big'] = len(x[np.abs(x) > 500])
        feature_dict.loc[seg_id, 'sum'] = x.sum()

        feature_dict.loc[seg_id, 'mean_change_rate'] = calc_change_rate(x)
        # calc_change_rate on slices of data
        for slice_length, direction in product([1000, 10000, 50000], ['first', 'last']):
            if direction == 'first':
                feature_dict.loc[seg_id, f'mean_change_rate_{direction}_{slice_length}'] = calc_change_rate(
                    x[:slice_length])
            elif direction == 'last':
                feature_dict.loc[seg_id, f'mean_change_rate_{direction}_{slice_length}'] = calc_change_rate(
                    x[-slice_length:])

        # percentiles on original and absolute values
        for p in percentiles:
            feature_dict.loc[seg_id, f'percentile_{p}'] = np.percentile(x, p)
            feature_dict.loc[seg_id, f'abs_percentile_{p}'] = np.percentile(np.abs(x), p)

        feature_dict.loc[seg_id, 'trend'] = add_trend_feature(x)
        feature_dict.loc[seg_id, 'abs_trend'] = add_trend_feature(x, abs_values=True)

        feature_dict.loc[seg_id, 'mad'] = x.mad()
        feature_dict.loc[seg_id, 'kurt'] = x.kurtosis()
        feature_dict.loc[seg_id, 'skew'] = x.skew()
        feature_dict.loc[seg_id, 'med'] = x.median()

        feature_dict.loc[seg_id, 'Hilbert_mean'] = np.abs(hilbert(x)).mean()

        for hw in hann_windows:
            feature_dict.loc[seg_id, f'Hann_window_mean_{hw}'] = (
                    convolve(x, hann(hw), mode='same') / sum(hann(hw))).mean()

        feature_dict.loc[seg_id, 'classic_sta_lta1_mean'] = classic_sta_lta(x, 500, 10000).mean()
        feature_dict.loc[seg_id, 'classic_sta_lta2_mean'] = classic_sta_lta(x, 5000, 100000).mean()
        feature_dict.loc[seg_id, 'classic_sta_lta3_mean'] = classic_sta_lta(x, 3333, 6666).mean()
        feature_dict.loc[seg_id, 'classic_sta_lta4_mean'] = classic_sta_lta(x, 10000, 25000).mean()
        feature_dict.loc[seg_id, 'classic_sta_lta5_mean'] = classic_sta_lta(x, 50, 1000).mean()
        feature_dict.loc[seg_id, 'classic_sta_lta6_mean'] = classic_sta_lta(x, 100, 5000).mean()
        feature_dict.loc[seg_id, 'classic_sta_lta7_mean'] = classic_sta_lta(x, 333, 666).mean()
        feature_dict.loc[seg_id, 'classic_sta_lta8_mean'] = classic_sta_lta(x, 4000, 10000).mean()

        # exponential rolling statistics
        ewma = pd.Series.ewm
        for s in spans:
            feature_dict.loc[seg_id, f'exp_Moving_average_{s}_mean'] = (ewma(x, span=s).mean(skipna=True)).mean(
                skipna=True)
            feature_dict.loc[seg_id, f'exp_Moving_average_{s}_std'] = (ewma(x, span=s).mean(skipna=True)).std(
                skipna=True)
            feature_dict.loc[seg_id, f'exp_Moving_std_{s}_mean'] = (ewma(x, span=s).std(skipna=True)).mean(skipna=True)
            feature_dict.loc[seg_id, f'exp_Moving_std_{s}_std'] = (ewma(x, span=s).std(skipna=True)).std(skipna=True)

        feature_dict.loc[seg_id, 'iqr'] = np.subtract(*np.percentile(x, [75, 25]))
        feature_dict.loc[seg_id, 'iqr1'] = np.subtract(*np.percentile(x, [95, 5]))
        feature_dict.loc[seg_id, 'ave10'] = stats.trim_mean(x, 0.1)

        for slice_length, threshold in product([50000, 100000, 150000],
                                               [5, 10, 20, 50, 100]):
            feature_dict.loc[seg_id, f'count_big_{slice_length}_threshold_{threshold}'] = (
                    np.abs(x[-slice_length:]) > threshold).sum()
            feature_dict.loc[seg_id, f'count_big_{slice_length}_less_threshold_{threshold}'] = (
                    np.abs(x[-slice_length:]) < threshold).sum()

            # statistics on rolling windows of various sizes
        for w in windows:
            x_roll_std = x.rolling(w).std().dropna().values
            x_roll_mean = x.rolling(w).mean().dropna().values

            feature_dict.loc[seg_id, f'ave_roll_std_{w}'] = x_roll_std.mean()
            feature_dict.loc[seg_id, f'std_roll_std_{w}'] = x_roll_std.std()
            feature_dict.loc[seg_id, f'max_roll_std_{w}'] = x_roll_std.max()
            feature_dict.loc[seg_id, f'min_roll_std_{w}'] = x_roll_std.min()

            for p in percentiles:
                feature_dict.loc[seg_id, f'percentile_roll_std_{p}_window_{w}'] = np.percentile(x_roll_std, p)

            feature_dict.loc[seg_id, f'av_change_abs_roll_std_{w}'] = np.mean(np.diff(x_roll_std))
            feature_dict.loc[seg_id, f'av_change_rate_roll_std_{w}'] = np.mean(
                np.nonzero((np.diff(x_roll_std) / x_roll_std[:-1]))[0])
            feature_dict.loc[seg_id, f'abs_max_roll_std_{w}'] = np.abs(x_roll_std).max()

            feature_dict.loc[seg_id, f'ave_roll_mean_{w}'] = x_roll_mean.mean()
            feature_dict.loc[seg_id, f'std_roll_mean_{w}'] = x_roll_mean.std()
            feature_dict.loc[seg_id, f'max_roll_mean_{w}'] = x_roll_mean.max()
            feature_dict.loc[seg_id, f'min_roll_mean_{w}'] = x_roll_mean.min()

            for p in percentiles:
                feature_dict.loc[seg_id, f'percentile_roll_mean_{p}_window_{w}'] = np.percentile(x_roll_mean, p)

            feature_dict.loc[seg_id, f'av_change_abs_roll_mean_{w}'] = np.mean(np.diff(x_roll_mean))
            feature_dict.loc[seg_id, f'av_change_rate_roll_mean_{w}'] = np.mean(
                np.nonzero((np.diff(x_roll_mean) / x_roll_mean[:-1]))[0])
            feature_dict.loc[seg_id, f'abs_max_roll_mean_{w}'] = np.abs(x_roll_mean).max()

        return feature_dict
예제 #32
0
def dtw_main(amplitudes, times, sampling_rate, path_length=1.0, length_error=.01, manual=False,
             window_size=2., prev_temp_pick=0., prev_query_pick=0., max_jump=1.5,
             manual_windowing = False, manual_guidance=True, alpha=0, plot_lags=False,
             extra_prev_query_pick=-1.0):
    
    '''
    This is messy and I need to write a proper docstring

    extra_prev_query_pick is for the dtw plot to show where the 
    previous position's pick was in the same scan if doing
    a multiscan run.
    '''

    global COORDS
    
    COORDS = []
    
    # The user picks the window for the dtw picking
    if manual_windowing:
        template_trace, template_times = manual_window_pick(amplitudes[0], times[0])
        query_trace, query_times = manual_window_pick(amplitudes[1], times[1])
    else:
        template_window = (prev_query_pick-window_size, prev_query_pick+window_size)
        query_window_centre = prev_query_pick+(prev_query_pick-prev_temp_pick)
        query_window = (query_window_centre-window_size, query_window_centre+window_size)
        
        prev_pick_index =get_index(times[0], prev_query_pick, .9e6/sampling_rate)
        template_trace, template_times = get_windowed_data(amplitudes[0], times[0],
                             *template_window, norm_factor=max(amplitudes[0]))    #Normalising by maximum in entire trace atm
        query_trace, query_times = get_windowed_data(amplitudes[1], times[1],     
                             *query_window, norm_factor=max(amplitudes[1]))       #Normalising by maximum in entire trace atm
    
    template_trace_env, query_trace_env = np.abs(hilbert(template_trace)), np.abs(hilbert(query_trace))
    
    # Get the dtw time arrays for both the waveform and envelope of the windowed traces.
    dtw_template, dtw_query = do_dtw(template_trace, query_trace, template_times, query_times, plot=manual_windowing, alpha=alpha)
    dtw_template_env, dtw_query_env = do_dtw(template_trace_env, query_trace_env, template_times, query_times, plot=False, alpha=alpha)
    
    if manual:
        looping = True
        while looping:
            dtw_plot([template_trace, template_trace_env], [query_trace, query_trace_env], template_times,
                         query_times, [dtw_template, dtw_template_env], [dtw_query, dtw_query_env],
                         manual_picking=True, template_picks=[prev_query_pick,0.,0.],query_picks=[extra_prev_query_pick,0.0,0.0])
            
            #Extract the times from the DTW picking
            if len(COORDS) > 0:
                looping = False
                tp, qp = [], []
                template_picks = list([np.append(tp,COORDS[i][1]) for i in range(len(COORDS))])
                query_picks = list([np.append(qp,COORDS[i][0]) for i in range(len(COORDS))])

                if plot_lags:
                    fig = lag_time_plot(dtw_template, dtw_query)
                if len(COORDS) > 1:
                    return template_times, query_times, template_picks, query_picks, dtw_template, dtw_query                  
            print('Please pick some arrival times on the plot')

    else:
        template_pick_index = get_index(dtw_template, prev_query_pick, 1.2e6/sampling_rate)
        if not template_pick_index:
            print('PlaceScan DTW: Template pick time not found in dtw array.')
            template_pick_index = 0
            
        template_pick, query_pick = prev_query_pick, dtw_query[template_pick_index]
        prelim_pick = (template_pick, query_pick)
        one_to_one_points = list(zip(*smoothed_gradient_av(dtw_template, dtw_query)))
        smallest_dist = np.inf
        
        if manual_guidance:
            #  Check for large dispersion in the region where picking
            window_samples = int(max_jump /1e6 * sampling_rate/2.)
            poss_t_times = dtw_template[template_pick_index-window_samples:template_pick_index+window_samples]
            poss_q_times = dtw_query[template_pick_index-window_samples:template_pick_index+window_samples]
            grads = np.asarray(np.diff(poss_t_times)/np.diff(poss_q_times))
            max_consec_infs, consec_infs = 1, 1
            for i in range(1, len(grads)):
                if (grads[i]==0. or grads[i]==np.inf) and  (grads[i-1]==0. or grads[i-1]==np.inf):
                    consec_infs += 1
                else:
                    consec_infs = 1
                if consec_infs > max_consec_infs:
                    max_consec_infs = consec_infs
            
            if max_consec_infs > .75*sampling_rate:
                print('Picking area too dispersive. Going to manual.')
                return prev_temp_pick, prev_query_pick, None, None, False
        
        
        # Snap to nearest one-to-one candidate
        one_to_one_candidates = []
        #Refine the pick, if reasonable
        for i in range(len(one_to_one_points)):
            dist = np.linalg.norm(np.array(one_to_one_points[i])-np.array(prelim_pick))
            if dist < smallest_dist and dist < max_jump:
                smallest_dist = dist
                one_to_one_candidates.append(np.array(one_to_one_points[i]))
                template_pick, query_pick = one_to_one_points[i][1], one_to_one_points[i][0]  #template_pick could be pre_query_pick
                
        template_picks, query_picks = [template_pick], [query_pick]    
    
    #Calculate and plot the velocities and errors.
    template_vel = \
               calculate_velocities(template_picks, path_length, length_error)
    query_vel = \
               calculate_velocities(query_picks, path_length, length_error)
    
    '''
    #Plot the final summary of the dtw picking process.
    template_query_stats = [np.mean(template_picks),np.min(template_picks),np.max(template_picks)]
    query_picks_stats = [np.mean(query_picks),np.min(query_picks),np.max(query_picks)]
    dtw_plot([template_trace, template_trace_env], [query_trace, query_trace_env], template_times,
             query_times, [dtw_template, dtw_template_env], [dtw_query, dtw_query_env],
             manual_picking=False, template_picks=template_query_stats, query_picks=query_picks_stats)
    '''

    if not manual and manual_guidance and np.abs(np.mean(template_picks)-np.mean(query_picks)) > max_jump:
        print('Picking off course. Going to manual.')
        return prev_temp_pick, prev_query_pick, None, None, False        
    
    return np.mean(template_picks), np.mean(query_picks), template_vel, query_vel, True
예제 #33
0
# The RF data have been converted to 8 bits due to size limit (up to 1.5
# MB) of the zipped files in the Supplementary materials.
RFdata1 = sio.loadmat('../RFdata1.mat');
RFdata2 = sio.loadmat('../RFdata2.mat');
RF1 = RFdata1['RF1']; param1 = RFdata1['param1'][0];
RF2 = RFdata2['RF2']; param2 = RFdata2['param2'][0];
RF1 = np.double(RF1); RF1 = RF1 - np.mean(RF1);
RF2 = np.double(RF2); RF2 = RF2 - np.mean(RF2);

#-- Example #1: Nylon fibers 
migRF1 = np.zeros(RF1[:,:,0].shape, dtype = 'complex128');
for idx in np.arange(7):
    x1, z1, migRF_idx = fkmig(RF1[:,:,idx], np.double(param1['fs']), np.double(param1['pitch']), \
        TXangle = np.double(param1['TXangle'][0][:,idx]), c = np.double(param1['c']));
    migRF1 += migRF_idx/7;
im1_mig = (np.abs(hilbert(np.real(migRF1))))**0.7;
exts = (np.min(x1)-np.mean(np.diff(x1)), np.max(x1)+np.mean(np.diff(x1)), \
    np.min(z1)-np.mean(np.diff(z1)), np.max(z1)-np.mean(np.diff(z1)))
plt.title('F-K Migrated Point Targets\n7 Angles Compounded\n$(-1.5^o : 0.5^o : 1.5^o)$');
plt.imshow(np.flipud(im1_mig), cmap = 'gray', extent = exts);
plt.xticks(0.01*np.arange(-1,2));
plt.yticks(0.01*np.arange(11)); 
plt.gca().invert_yaxis()
plt.xlabel('Azimuth (m)'); plt.ylabel('Depth (m)');
plt.show();

#-- Example #2: Circular Targets
migRF2 = np.zeros(RF2[:,:,0].shape, dtype = 'complex128');
for idx in np.arange(7):
    x2, z2, migRF_idx = fkmig(RF2[:,:,idx], np.double(param2['fs']), \
        np.double(param2['pitch']), TXangle = np.double(param2['TXangle'][0][:,idx]), \
예제 #34
0
def _detect_ready_tone(w, fs):
    # get envelope of DC free signal and envelope of BP signal around freq of interest
    h = np.abs(signal.hilbert(w - np.median(w)))
    fh = np.abs(signal.hilbert(dsp.bp(w, si=1 / fs, b=FTONE * np.array([0.9, 0.95, 1.15, 1.1]))))
    dtect = _running_mean(fh / (h + 1e-3), int(fs * 0.1)) > 0.8
    return np.where(np.diff(dtect.astype(int)) == 1)[0]
def extract_envelope_single_channel(channel, multithreading=False):
    return np.abs(scignal.hilbert(np.real(channel)))
예제 #36
0
def compute_vertical_snr(src_stream):
    """Compute the SNR of the Z component (Z before deconvolution)
    including the onset pulse (key 'snr_prior'). Stores results in metadata of input stream traces.
    This SNR is a ratio of max envelopes.

    Some authors compute this prior SNR on signal after rotation but before deconvolution, however
    that doesn't make sense for LQT rotation where the optimal rotation will result in the least
    energy in the L component. For simplicity we compute it on Z-component only which is a reasonable
    estimate for teleseismic events.

    :param src_stream: Seismic traces before RF deconvolution of raw stream.
    :type src_stream: rf.RFStream or obspy.Stream
    """
    logger = logging.getLogger(__name__)

    if isinstance(src_stream, rf.RFStream):
        slice2 = lambda s, w: s.slice2(*w, reftime='onset')
    elif isinstance(src_stream, obspy.Stream):
        slice2 = lambda s, w: s.slice(w[0] if w[0] is None else s[0].stats.onset - w[0],
                                      w[1] if w[1] is None else s[0].stats.onset + w[1])
    else:
        assert False, "NYI"
    # end if

    def _set_nan_snr(stream):
        md_dict = {'snr_prior': np.nan}
        for tr in stream:
            tr.stats.update(md_dict)
        # end for
    # end func

    src_stream = src_stream.select(component='Z')

    # Compute max envelope amplitude from onset onwards relative to max envelope before onset.
    PRIOR_PICK_SIGNAL_WINDOW = (-5.0, 25.0)
    PRIOR_NOISE_SIGNAL_WINDOW = (None, -5.0)
    pick_signal = slice2(src_stream.copy(), PRIOR_PICK_SIGNAL_WINDOW)
    pick_signal = pick_signal.taper(0.5, max_length=0.5)
    pick_signal = np.array([tr.data for tr in pick_signal])
    if len(pick_signal.shape) == 1:
        pick_signal = pick_signal.reshape(1, -1)
    # Compute envelope of all traces
    if not np.any(pick_signal):
        _set_nan_snr(src_stream)
        return
    # end if
    pick_signal = np.absolute(signal.hilbert(pick_signal, axis=1))

    noise = slice2(src_stream.copy(), PRIOR_NOISE_SIGNAL_WINDOW)
    # Taper the slices so that the result is not overly affected by the phase of the signal at the ends.
    noise = noise.taper(0.5, max_length=0.5)
    noise = np.array([tr.data for tr in noise])
    if len(noise.shape) == 1:
        noise = noise.reshape(1, -1)
    if not np.any(noise):
        _set_nan_snr(src_stream)
        return
    # end if
    noise = np.absolute(signal.hilbert(noise, axis=1))

    if pick_signal.shape[0] != noise.shape[0]:
        logger.error("Shape inconsistency between noise and signal slices: {}[0] != {}[0]"
                     .format(pick_signal.shape, noise.shape))
        _set_nan_snr(src_stream)
    else:
        snr_prior = np.max(pick_signal, axis=1) / np.max(noise, axis=1)
        for i, tr in enumerate(src_stream):
            md_dict = {'snr_prior': snr_prior[i]}
            tr.stats.update(md_dict)
예제 #37
0
 def get_envelope(self, y):
     analytic_signal = hilbert(y)
     amplitude_envelope = np.abs(analytic_signal)
     return amplitude_envelope
예제 #38
0
def get_tfr(cfg, recursive=False, n_jobs=1):
    '''
    @params:
    tfr_type: 'multitaper' or 'morlet'
    recursive: if True, load raw files in sub-dirs recursively
    export_path: path to save plots
    n_jobs: number of cores to run in parallel
    '''

    cfg = check_config(cfg)
    tfr_type = cfg.TFR_TYPE
    export_path = cfg.EXPORT_PATH
    t_buffer = cfg.T_BUFFER
    if tfr_type == 'multitaper':
        tfr = mne.time_frequency.tfr_multitaper
    elif tfr_type == 'morlet':
        tfr = mne.time_frequency.tfr_morlet
    elif tfr_type == 'butter':
        butter_order = 4  # TODO: parameterize
        tfr = lfilter
    elif tfr_type == 'fir':
        raise NotImplementedError
    else:
        raise ValueError('Wrong TFR type %s' % tfr_type)
    n_jobs = cfg.N_JOBS
    if n_jobs is None:
        n_jobs = mp.cpu_count()

    if hasattr(cfg, 'DATA_PATHS'):
        if export_path is None:
            raise ValueError(
                'For multiple directories, cfg.EXPORT_PATH cannot be None')
        else:
            outpath = export_path
        # custom event file
        if hasattr(cfg, 'EVENT_FILE') and cfg.EVENT_FILE is not None:
            events = mne.read_events(cfg.EVENT_FILE)
        file_prefix = 'grandavg'

        # load and merge files from all directories
        flist = []
        for ddir in cfg.DATA_PATHS:
            ddir = ddir.replace('\\', '/')
            if ddir[-1] != '/': ddir += '/'
            for f in qc.get_file_list(ddir, fullpath=True,
                                      recursive=recursive):
                if qc.parse_path(f).ext in ['fif', 'bdf', 'gdf']:
                    flist.append(f)
        raw, events = pu.load_multi(flist)
    else:
        logger.info('Loading %s' % cfg.DATA_FILE)
        raw, events = pu.load_raw(cfg.DATA_FILE)

        # custom events
        if hasattr(cfg, 'EVENT_FILE') and cfg.EVENT_FILE is not None:
            events = mne.read_events(cfg.EVENT_FILE)

        if export_path is None:
            [outpath, file_prefix, _] = qc.parse_path_list(cfg.DATA_FILE)
        else:
            file_prefix = qc.parse_path(cfg.DATA_FILE).name
            outpath = export_path
            file_prefix = qc.parse_path(cfg.DATA_FILE).name

    # re-referencing
    if cfg.REREFERENCE is not None:
        pu.rereference(raw, cfg.REREFERENCE[1], cfg.REREFERENCE[0])
        assert cfg.REREFERENCE[0] in raw.ch_names

    sfreq = raw.info['sfreq']

    # set channels of interest
    picks = pu.channel_names_to_index(raw, cfg.CHANNEL_PICKS)
    spchannels = pu.channel_names_to_index(raw, cfg.SP_CHANNELS)

    if max(picks) > len(raw.info['ch_names']):
        msg = 'ERROR: "picks" has a channel index %d while there are only %d channels.' %\
              (max(picks), len(raw.info['ch_names']))
        raise RuntimeError(msg)

    # Apply filters
    raw = pu.preprocess(raw,
                        spatial=cfg.SP_FILTER,
                        spatial_ch=spchannels,
                        spectral=cfg.TP_FILTER,
                        spectral_ch=picks,
                        notch=cfg.NOTCH_FILTER,
                        notch_ch=picks,
                        multiplier=cfg.MULTIPLIER,
                        n_jobs=n_jobs)

    # Read epochs
    classes = {}
    for t in cfg.TRIGGERS:
        if t in set(events[:, -1]):
            if hasattr(cfg, 'tdef'):
                classes[cfg.tdef.by_value[t]] = t
            else:
                classes[str(t)] = t
    if len(classes) == 0:
        raise ValueError('No desired event was found from the data.')

    try:
        tmin = cfg.EPOCH[0]
        tmin_buffer = tmin - t_buffer
        raw_tmax = raw._data.shape[1] / sfreq - 0.1
        if cfg.EPOCH[1] is None:
            if cfg.POWER_AVERAGED:
                raise ValueError(
                    'EPOCH value cannot have None for grand averaged TFR')
            else:
                if len(cfg.TRIGGERS) > 1:
                    raise ValueError(
                        'If the end time of EPOCH is None, only a single event can be defined.'
                    )
                t_ref = events[np.where(
                    events[:, 2] == list(cfg.TRIGGERS)[0])[0][0], 0] / sfreq
                tmax = raw_tmax - t_ref - t_buffer
        else:
            tmax = cfg.EPOCH[1]
        tmax_buffer = tmax + t_buffer
        if tmax_buffer > raw_tmax:
            raise ValueError(
                'Epoch length with buffer (%.3f) is larger than signal length (%.3f)'
                % (tmax_buffer, raw_tmax))
        epochs_all = mne.Epochs(raw,
                                events,
                                classes,
                                tmin=tmin_buffer,
                                tmax=tmax_buffer,
                                proj=False,
                                picks=picks,
                                baseline=None,
                                preload=True)
        if epochs_all.drop_log_stats() > 0:
            logger.error(
                '\n** Bad epochs found. Dropping into a Python shell.')
            logger.error(epochs_all.drop_log)
            logger.error('tmin = %.1f, tmax = %.1f, tmin_buffer = %.1f, tmax_buffer = %.1f, raw length = %.1f' % \
                (tmin, tmax, tmin_buffer, tmax_buffer, raw._data.shape[1] / sfreq))
            logger.error('\nType exit to continue.\n')
            pdb.set_trace()
    except:
        logger.critical(
            '\n*** (tfr_export) Unknown error occurred while epoching ***')
        logger.critical('tmin = %.1f, tmax = %.1f, tmin_buffer = %.1f, tmax_buffer = %.1f, raw length = %.1f' % \
            (tmin, tmax, tmin_buffer, tmax_buffer, raw._data.shape[1] / sfreq))
        pdb.set_trace()

    power = {}
    for evname in classes:
        export_dir = outpath
        qc.make_dirs(export_dir)
        logger.info('>> Processing %s' % evname)
        freqs = cfg.FREQ_RANGE  # define frequencies of interest
        n_cycles = freqs / 2.  # different number of cycle per frequency
        if cfg.POWER_AVERAGED:
            # grand-average TFR
            epochs = epochs_all[evname][:]
            if len(epochs) == 0:
                logger.WARNING('No %s epochs. Skipping.' % evname)
                continue

            if tfr_type == 'butter':
                b, a = butter_bandpass(cfg.FREQ_RANGE[0],
                                       cfg.FREQ_RANGE[-1],
                                       sfreq,
                                       order=butter_order)
                tfr_filtered = lfilter(b, a, epochs, axis=2)
                tfr_hilbert = hilbert(tfr_filtered)
                tfr_power = abs(tfr_hilbert)
                tfr_data = np.mean(tfr_power, axis=0)
            elif tfr_type == 'fir':
                raise NotImplementedError
            else:
                power[evname] = tfr(epochs,
                                    freqs=freqs,
                                    n_cycles=n_cycles,
                                    use_fft=False,
                                    return_itc=False,
                                    decim=1,
                                    n_jobs=n_jobs)
                power[evname] = power[evname].crop(tmin=tmin, tmax=tmax)
                tfr_data = power[evname].data

            if cfg.EXPORT_MATLAB is True:
                # export all channels to MATLAB
                mout = '%s/%s-%s-%s.mat' % (export_dir, file_prefix,
                                            cfg.SP_FILTER, evname)
                scipy.io.savemat(
                    mout, {
                        'tfr': tfr_data,
                        'chs': epochs.ch_names,
                        'events': events,
                        'sfreq': sfreq,
                        'epochs': cfg.EPOCH,
                        'freqs': cfg.FREQ_RANGE
                    })
                logger.info('Exported %s' % mout)
            if cfg.EXPORT_PNG is True:
                # Inspect power for each channel
                for ch in np.arange(len(picks)):
                    chname = raw.ch_names[picks[ch]]
                    title = 'Peri-event %s - Channel %s' % (evname, chname)

                    # mode= None | 'logratio' | 'ratio' | 'zscore' | 'mean' | 'percent'
                    fig = power[evname].plot([ch],
                                             baseline=cfg.BS_TIMES,
                                             mode=cfg.BS_MODE,
                                             show=False,
                                             colorbar=True,
                                             title=title,
                                             vmin=cfg.VMIN,
                                             VMAXx=cfg.VMAX,
                                             dB=False)
                    fout = '%s/%s-%s-%s-%s.png' % (
                        export_dir, file_prefix, cfg.SP_FILTER, evname, chname)
                    fig.savefig(fout)
                    plt.close()
                    logger.info('Exported to %s' % fout)
        else:
            # TFR per event
            for ep in range(len(epochs_all[evname])):
                epochs = epochs_all[evname][ep]
                if len(epochs) == 0:
                    logger.WARNING('No %s epochs. Skipping.' % evname)
                    continue
                power[evname] = tfr(epochs,
                                    freqs=freqs,
                                    n_cycles=n_cycles,
                                    use_fft=False,
                                    return_itc=False,
                                    decim=1,
                                    n_jobs=n_jobs)
                power[evname] = power[evname].crop(tmin=tmin, tmax=tmax)
                if cfg.EXPORT_MATLAB is True:
                    # export all channels to MATLAB
                    mout = '%s/%s-%s-%s-ep%02d.mat' % (
                        export_dir, file_prefix, cfg.SP_FILTER, evname, ep + 1)
                    scipy.io.savemat(
                        mout, {
                            'tfr': power[evname].data,
                            'chs': power[evname].ch_names,
                            'events': events,
                            'sfreq': sfreq,
                            'tmin': tmin,
                            'tmax': tmax,
                            'freqs': cfg.FREQ_RANGE
                        })
                    logger.info('Exported %s' % mout)
                if cfg.EXPORT_PNG is True:
                    # Inspect power for each channel
                    for ch in np.arange(len(picks)):
                        chname = raw.ch_names[picks[ch]]
                        title = 'Peri-event %s - Channel %s, Trial %d' % (
                            evname, chname, ep + 1)
                        # mode= None | 'logratio' | 'ratio' | 'zscore' | 'mean' | 'percent'
                        fig = power[evname].plot([ch],
                                                 baseline=cfg.BS_TIMES,
                                                 mode=cfg.BS_MODE,
                                                 show=False,
                                                 colorbar=True,
                                                 title=title,
                                                 vmin=cfg.VMIN,
                                                 vmax=cfg.VMAX,
                                                 dB=False)
                        fout = '%s/%s-%s-%s-%s-ep%02d.png' % (
                            export_dir, file_prefix, cfg.SP_FILTER, evname,
                            chname, ep + 1)
                        fig.savefig(fout)
                        plt.close()
                        logger.info('Exported %s' % fout)

    if hasattr(cfg, 'POWER_DIFF'):
        export_dir = '%s/diff' % outpath
        qc.make_dirs(export_dir)
        labels = classes.keys()
        df = power[labels[0]] - power[labels[1]]
        df.data = np.log(np.abs(df.data))
        # Inspect power diff for each channel
        for ch in np.arange(len(picks)):
            chname = raw.ch_names[picks[ch]]
            title = 'Peri-event %s-%s - Channel %s' % (labels[0], labels[1],
                                                       chname)

            # mode= None | 'logratio' | 'ratio' | 'zscore' | 'mean' | 'percent'
            fig = df.plot([ch],
                          baseline=cfg.BS_TIMES,
                          mode=cfg.BS_MODE,
                          show=False,
                          colorbar=True,
                          title=title,
                          vmin=3.0,
                          vmax=-3.0,
                          dB=False)
            fout = '%s/%s-%s-diff-%s-%s-%s.jpg' % (export_dir, file_prefix,
                                                   cfg.SP_FILTER, labels[0],
                                                   labels[1], chname)
            logger.info('Exporting to %s' % fout)
            fig.savefig(fout)
            plt.close()
    logger.info('Finished !')
예제 #39
0
        # load data
        dataStruct = sp.io.loadmat(filename)
        data = dataStruct['data']
        locs = dataStruct['locs']

        # how much data we want
        data = data[10000:100000] # 20 to 100 second part 

        # for every channel 
        for ch in range(len(locs)):


            #calculating phase of theta of 20 seconds of the signal
            phase_data = butter_bandpass_filter(data[:,ch], phase_providing_band[0], phase_providing_band[1], round(float(fs)));
            phase_data_hilbert = hilbert(phase_data);
            phase_data_angle = np.angle(phase_data_hilbert);

            #calculating amplitude envelope of high gamma of 20 seconds of the signal
            amp_data = butter_bandpass_filter(data[:,ch], amplitude_providing_band[0], amplitude_providing_band[1], round(float(fs)));
            amp_data_hilbert = hilbert(amp_data);
            amp_data_abs = abs(amp_data_hilbert);
           
            # get random number to use as start sample
            rdm = ((np.random.rand(1)*10 + 20) * 1000)
            rdm = round(np.asscalar(rdm))
            
            # on which part of the data we want to calculate PAC   
            # it takes a random start sample between 20 and 30s
            phase_data_angle = phase_data_angle[rdm:rdm + round(timewindows[tw] * fs)] # 10 to 15s (which corresponds with 60 to 62s of the data)
            amp_data_abs = amp_data_abs[rdm:(rdm + round(timewindows[tw] * fs)]    # 10 to 15s (which corresponds with 60 to 62s of the data)
예제 #40
0
def strf(time,
         freq,
         sr,
         bins_per_octave,
         rate=1,
         scale=1,
         phi=0,
         theta=0,
         ndft=None):
    """Spectral-temporal response fields for both up and down direction.

    Implement the STRF described in Chi, Ru, and Shamma:
    Chi, T., Ru, P., & Shamma, S. A. (2005). Multiresolution spectrotemporal
    analysis of complex sounds. The Journal of the Acoustical Society of
    America, 118(2), 887–906. https://doi.org/10.1121/1.1945807.

    Parameters
    ----------
    time: int or float
        Time support in seconds. The returned STRF will cover the range
        [0, time).
    freq: int or float
        Frequency support in number of octaves. The returned STRF will
        cover the range [-freq, freq).
    sr: int
        Sampling rate in Hz.
    bins_per_octave: int
        Number of frequency bins per octave on the log-frequency scale.
    rate: int or float
        Stretch factor in time.
    scale: int or float
        Stretch factor in frequency.
    phi: float
        Orientation of spectral evolution in radians.
    theta: float
        Orientation of time evolution in radians.

    """
    def _hs(x, scale):
        """Construct a 1-D spectral impulse response with a 2-diff Gaussian.

        This is the prototype filter suggested by Chi et al.
        """
        sx = scale * x
        return scale * (1 -
                        (2 * np.pi * sx)**2) * np.exp(-(2 * np.pi * sx)**2 / 2)

    def _ht(t, rate):
        """Construct a 1-D temporal impulse response with a Gamma function.

        This is the prototype filter suggested by Chi et al.
        """
        rt = rate * t
        return rate * rt**2 * np.exp(-3.5 * rt) * np.sin(2 * np.pi * rt)

    hs = _hs(
        np.linspace(-freq,
                    freq,
                    endpoint=False,
                    num=int(2 * freq * bins_per_octave)), scale)
    ht = _ht(np.linspace(0, time, endpoint=False, num=int(sr * time)), rate)
    if ndft is None:
        ndft = max(512, nextpow2(max(len(hs), len(ht))))
        ndft = max(len(hs), len(ht))
    assert ndft >= max(len(ht), len(hs))
    hsa = signal.hilbert(hs, ndft)[:len(hs)]
    hta = signal.hilbert(ht, ndft)[:len(ht)]
    hirs = hs * np.cos(phi) + hsa.imag * np.sin(phi)
    hirt = ht * np.cos(theta) + hta.imag * np.sin(theta)
    hirs_ = signal.hilbert(hirs, ndft)[:len(hs)]
    hirt_ = signal.hilbert(hirt, ndft)[:len(ht)]
    return np.outer(hirt_, hirs_).real,\
        np.outer(np.conj(hirt_), hirs_).real
예제 #41
0
def create_features(seg_id, seg, X, st, end):
    """
    creates the primary statistical features from signal slices, for training slices and test signals
    heavily influenced by Lukayenko (2019), added frequency banding via digital filters, Fourier transform was
    switched to magnitude and phase based upon the EDA
    :param seg_id: segment id as a number
    :param seg: segment, as a DataFrame
    :param X: DataFrame that is the target into which features are created
    :param st: segment start id, for debug
    :param end: segment end id, for debug
    :return: X: DataFrame that is the target into which features are created
    """
    try:
        X.loc[seg_id, 'seg_id'] = np.int32(seg_id)
        X.loc[seg_id, 'seg_start'] = np.int32(st)
        X.loc[seg_id, 'seg_end'] = np.int32(end)
    except:
        pass

    xc = pd.Series(seg['acoustic_data'].values)
    xcdm = xc - np.mean(xc)

    b, a = des_bw_filter_lp(cutoff=18000)
    xcz = sg.lfilter(b, a, xcdm)

    zc = np.fft.fft(xcz)
    zc = zc[:MAX_FREQ_IDX]

    # FFT transform values
    realFFT = np.real(zc)
    imagFFT = np.imag(zc)

    freq_bands = [x for x in range(0, MAX_FREQ_IDX, FREQ_STEP)]
    magFFT = np.sqrt(realFFT**2 + imagFFT**2)
    phzFFT = np.arctan(imagFFT / realFFT)
    phzFFT[phzFFT == -np.inf] = -np.pi / 2.0
    phzFFT[phzFFT == np.inf] = np.pi / 2.0
    phzFFT = np.nan_to_num(phzFFT)

    for freq in freq_bands:
        X.loc[seg_id, 'FFT_Mag_01q%d' % freq] = np.quantile(
            magFFT[freq:freq + FREQ_STEP], 0.01)
        X.loc[seg_id, 'FFT_Mag_10q%d' % freq] = np.quantile(
            magFFT[freq:freq + FREQ_STEP], 0.1)
        X.loc[seg_id, 'FFT_Mag_90q%d' % freq] = np.quantile(
            magFFT[freq:freq + FREQ_STEP], 0.9)
        X.loc[seg_id, 'FFT_Mag_99q%d' % freq] = np.quantile(
            magFFT[freq:freq + FREQ_STEP], 0.99)
        X.loc[seg_id,
              'FFT_Mag_mean%d' % freq] = np.mean(magFFT[freq:freq + FREQ_STEP])
        X.loc[seg_id,
              'FFT_Mag_std%d' % freq] = np.std(magFFT[freq:freq + FREQ_STEP])
        X.loc[seg_id,
              'FFT_Mag_max%d' % freq] = np.max(magFFT[freq:freq + FREQ_STEP])

        X.loc[seg_id,
              'FFT_Phz_mean%d' % freq] = np.mean(phzFFT[freq:freq + FREQ_STEP])
        X.loc[seg_id,
              'FFT_Phz_std%d' % freq] = np.std(phzFFT[freq:freq + FREQ_STEP])

    X.loc[seg_id, 'FFT_Rmean'] = realFFT.mean()
    X.loc[seg_id, 'FFT_Rstd'] = realFFT.std()
    X.loc[seg_id, 'FFT_Rmax'] = realFFT.max()
    X.loc[seg_id, 'FFT_Rmin'] = realFFT.min()
    X.loc[seg_id, 'FFT_Imean'] = imagFFT.mean()
    X.loc[seg_id, 'FFT_Istd'] = imagFFT.std()
    X.loc[seg_id, 'FFT_Imax'] = imagFFT.max()
    X.loc[seg_id, 'FFT_Imin'] = imagFFT.min()

    X.loc[seg_id, 'FFT_Rmean_first_6000'] = realFFT[:6000].mean()
    X.loc[seg_id, 'FFT_Rstd__first_6000'] = realFFT[:6000].std()
    X.loc[seg_id, 'FFT_Rmax_first_6000'] = realFFT[:6000].max()
    X.loc[seg_id, 'FFT_Rmin_first_6000'] = realFFT[:6000].min()
    X.loc[seg_id, 'FFT_Rmean_first_18000'] = realFFT[:18000].mean()
    X.loc[seg_id, 'FFT_Rstd_first_18000'] = realFFT[:18000].std()
    X.loc[seg_id, 'FFT_Rmax_first_18000'] = realFFT[:18000].max()
    X.loc[seg_id, 'FFT_Rmin_first_18000'] = realFFT[:18000].min()

    del xcz
    del zc

    b, a = des_bw_filter_lp(cutoff=2500)
    xc0 = sg.lfilter(b, a, xcdm)

    b, a = des_bw_filter_bp(low=2500, high=5000)
    xc1 = sg.lfilter(b, a, xcdm)

    b, a = des_bw_filter_bp(low=5000, high=7500)
    xc2 = sg.lfilter(b, a, xcdm)

    b, a = des_bw_filter_bp(low=7500, high=10000)
    xc3 = sg.lfilter(b, a, xcdm)

    b, a = des_bw_filter_bp(low=10000, high=12500)
    xc4 = sg.lfilter(b, a, xcdm)

    b, a = des_bw_filter_bp(low=12500, high=15000)
    xc5 = sg.lfilter(b, a, xcdm)

    b, a = des_bw_filter_bp(low=15000, high=17500)
    xc6 = sg.lfilter(b, a, xcdm)

    b, a = des_bw_filter_bp(low=17500, high=20000)
    xc7 = sg.lfilter(b, a, xcdm)

    b, a = des_bw_filter_hp(cutoff=20000)
    xc8 = sg.lfilter(b, a, xcdm)

    sigs = [
        xc,
        pd.Series(xc0),
        pd.Series(xc1),
        pd.Series(xc2),
        pd.Series(xc3),
        pd.Series(xc4),
        pd.Series(xc5),
        pd.Series(xc6),
        pd.Series(xc7),
        pd.Series(xc8)
    ]

    for i, sig in enumerate(sigs):
        X.loc[seg_id, 'mean_%d' % i] = sig.mean()
        X.loc[seg_id, 'std_%d' % i] = sig.std()
        X.loc[seg_id, 'max_%d' % i] = sig.max()
        X.loc[seg_id, 'min_%d' % i] = sig.min()

        X.loc[seg_id, 'mean_change_abs_%d' % i] = np.mean(np.diff(sig))
        X.loc[seg_id, 'mean_change_rate_%d' % i] = np.mean(
            np.nonzero((np.diff(sig) / sig[:-1]))[0])
        X.loc[seg_id, 'abs_max_%d' % i] = np.abs(sig).max()
        X.loc[seg_id, 'abs_min_%d' % i] = np.abs(sig).min()

        X.loc[seg_id, 'std_first_50000_%d' % i] = sig[:50000].std()
        X.loc[seg_id, 'std_last_50000_%d' % i] = sig[-50000:].std()
        X.loc[seg_id, 'std_first_10000_%d' % i] = sig[:10000].std()
        X.loc[seg_id, 'std_last_10000_%d' % i] = sig[-10000:].std()

        X.loc[seg_id, 'avg_first_50000_%d' % i] = sig[:50000].mean()
        X.loc[seg_id, 'avg_last_50000_%d' % i] = sig[-50000:].mean()
        X.loc[seg_id, 'avg_first_10000_%d' % i] = sig[:10000].mean()
        X.loc[seg_id, 'avg_last_10000_%d' % i] = sig[-10000:].mean()

        X.loc[seg_id, 'min_first_50000_%d' % i] = sig[:50000].min()
        X.loc[seg_id, 'min_last_50000_%d' % i] = sig[-50000:].min()
        X.loc[seg_id, 'min_first_10000_%d' % i] = sig[:10000].min()
        X.loc[seg_id, 'min_last_10000_%d' % i] = sig[-10000:].min()

        X.loc[seg_id, 'max_first_50000_%d' % i] = sig[:50000].max()
        X.loc[seg_id, 'max_last_50000_%d' % i] = sig[-50000:].max()
        X.loc[seg_id, 'max_first_10000_%d' % i] = sig[:10000].max()
        X.loc[seg_id, 'max_last_10000_%d' % i] = sig[-10000:].max()

        X.loc[seg_id, 'max_to_min_%d' % i] = sig.max() / np.abs(sig.min())
        X.loc[seg_id, 'max_to_min_diff_%d' % i] = sig.max() - np.abs(sig.min())
        X.loc[seg_id, 'count_big_%d' % i] = len(sig[np.abs(sig) > 500])
        X.loc[seg_id, 'sum_%d' % i] = sig.sum()

        X.loc[seg_id, 'mean_change_rate_first_50000_%d' % i] = np.mean(
            np.nonzero((np.diff(sig[:50000]) / sig[:50000][:-1]))[0])
        X.loc[seg_id, 'mean_change_rate_last_50000_%d' % i] = np.mean(
            np.nonzero((np.diff(sig[-50000:]) / sig[-50000:][:-1]))[0])
        X.loc[seg_id, 'mean_change_rate_first_10000_%d' % i] = np.mean(
            np.nonzero((np.diff(sig[:10000]) / sig[:10000][:-1]))[0])
        X.loc[seg_id, 'mean_change_rate_last_10000_%d' % i] = np.mean(
            np.nonzero((np.diff(sig[-10000:]) / sig[-10000:][:-1]))[0])

        X.loc[seg_id, 'q95_%d' % i] = np.quantile(sig, 0.95)
        X.loc[seg_id, 'q99_%d' % i] = np.quantile(sig, 0.99)
        X.loc[seg_id, 'q05_%d' % i] = np.quantile(sig, 0.05)
        X.loc[seg_id, 'q01_%d' % i] = np.quantile(sig, 0.01)

        X.loc[seg_id, 'abs_q95_%d' % i] = np.quantile(np.abs(sig), 0.95)
        X.loc[seg_id, 'abs_q99_%d' % i] = np.quantile(np.abs(sig), 0.99)
        X.loc[seg_id, 'abs_q05_%d' % i] = np.quantile(np.abs(sig), 0.05)
        X.loc[seg_id, 'abs_q01_%d' % i] = np.quantile(np.abs(sig), 0.01)

        X.loc[seg_id, 'trend_%d' % i] = add_trend_feature(sig)
        X.loc[seg_id, 'abs_trend_%d' % i] = add_trend_feature(sig,
                                                              abs_values=True)
        X.loc[seg_id, 'abs_mean_%d' % i] = np.abs(sig).mean()
        X.loc[seg_id, 'abs_std_%d' % i] = np.abs(sig).std()

        X.loc[seg_id, 'mad_%d' % i] = sig.mad()
        X.loc[seg_id, 'kurt_%d' % i] = sig.kurtosis()
        X.loc[seg_id, 'skew_%d' % i] = sig.skew()
        X.loc[seg_id, 'med_%d' % i] = sig.median()

        X.loc[seg_id, 'Hilbert_mean_%d' % i] = np.abs(hilbert(sig)).mean()
        X.loc[seg_id,
              'Hann_window_mean'] = (convolve(xc, hann(150), mode='same') /
                                     sum(hann(150))).mean()

        X.loc[seg_id, 'classic_sta_lta1_mean_%d' % i] = classic_sta_lta(
            sig, 500, 10000).mean()
        X.loc[seg_id, 'classic_sta_lta2_mean_%d' % i] = classic_sta_lta(
            sig, 5000, 100000).mean()
        X.loc[seg_id, 'classic_sta_lta3_mean_%d' % i] = classic_sta_lta(
            sig, 3333, 6666).mean()
        X.loc[seg_id, 'classic_sta_lta4_mean_%d' % i] = classic_sta_lta(
            sig, 10000, 25000).mean()

        X.loc[seg_id, 'Moving_average_700_mean_%d' %
              i] = sig.rolling(window=700).mean().mean(skipna=True)
        X.loc[seg_id, 'Moving_average_1500_mean_%d' %
              i] = sig.rolling(window=1500).mean().mean(skipna=True)
        X.loc[seg_id, 'Moving_average_3000_mean_%d' %
              i] = sig.rolling(window=3000).mean().mean(skipna=True)
        X.loc[seg_id, 'Moving_average_6000_mean_%d' %
              i] = sig.rolling(window=6000).mean().mean(skipna=True)

        ewma = pd.Series.ewm
        X.loc[seg_id, 'exp_Moving_average_300_mean_%d' % i] = ewma(
            sig, span=300).mean().mean(skipna=True)
        X.loc[seg_id, 'exp_Moving_average_3000_mean_%d' % i] = ewma(
            sig, span=3000).mean().mean(skipna=True)
        X.loc[seg_id, 'exp_Moving_average_30000_mean_%d' % i] = ewma(
            sig, span=6000).mean().mean(skipna=True)

        no_of_std = 2
        X.loc[seg_id, 'MA_700MA_std_mean_%d' %
              i] = sig.rolling(window=700).std().mean()
        X.loc[seg_id, 'MA_700MA_BB_high_mean_%d' % i] = (
            X.loc[seg_id, 'Moving_average_700_mean_%d' % i] +
            no_of_std * X.loc[seg_id, 'MA_700MA_std_mean_%d' % i]).mean()
        X.loc[seg_id, 'MA_700MA_BB_low_mean_%d' % i] = (
            X.loc[seg_id, 'Moving_average_700_mean_%d' % i] -
            no_of_std * X.loc[seg_id, 'MA_700MA_std_mean_%d' % i]).mean()
        X.loc[seg_id, 'MA_400MA_std_mean_%d' %
              i] = sig.rolling(window=400).std().mean()
        X.loc[seg_id, 'MA_400MA_BB_high_mean_%d' % i] = (
            X.loc[seg_id, 'Moving_average_700_mean_%d' % i] +
            no_of_std * X.loc[seg_id, 'MA_400MA_std_mean_%d' % i]).mean()
        X.loc[seg_id, 'MA_400MA_BB_low_mean_%d' % i] = (
            X.loc[seg_id, 'Moving_average_700_mean_%d' % i] -
            no_of_std * X.loc[seg_id, 'MA_400MA_std_mean_%d' % i]).mean()
        X.loc[seg_id, 'MA_1000MA_std_mean_%d' %
              i] = sig.rolling(window=1000).std().mean()

        X.loc[seg_id,
              'iqr_%d' % i] = np.subtract(*np.percentile(sig, [75, 25]))
        X.loc[seg_id, 'q999_%d' % i] = np.quantile(sig, 0.999)
        X.loc[seg_id, 'q001_%d' % i] = np.quantile(sig, 0.001)
        X.loc[seg_id, 'ave10_%d' % i] = stats.trim_mean(sig, 0.1)

    for windows in [10, 100, 1000]:
        x_roll_std = xc.rolling(windows).std().dropna().values
        x_roll_mean = xc.rolling(windows).mean().dropna().values

        X.loc[seg_id, 'ave_roll_std_' + str(windows)] = x_roll_std.mean()
        X.loc[seg_id, 'std_roll_std_' + str(windows)] = x_roll_std.std()
        X.loc[seg_id, 'max_roll_std_' + str(windows)] = x_roll_std.max()
        X.loc[seg_id, 'min_roll_std_' + str(windows)] = x_roll_std.min()
        X.loc[seg_id,
              'q01_roll_std_' + str(windows)] = np.quantile(x_roll_std, 0.01)
        X.loc[seg_id,
              'q05_roll_std_' + str(windows)] = np.quantile(x_roll_std, 0.05)
        X.loc[seg_id,
              'q95_roll_std_' + str(windows)] = np.quantile(x_roll_std, 0.95)
        X.loc[seg_id,
              'q99_roll_std_' + str(windows)] = np.quantile(x_roll_std, 0.99)
        X.loc[seg_id, 'av_change_abs_roll_std_' + str(windows)] = np.mean(
            np.diff(x_roll_std))
        X.loc[seg_id, 'av_change_rate_roll_std_' + str(windows)] = np.mean(
            np.nonzero((np.diff(x_roll_std) / x_roll_std[:-1]))[0])
        X.loc[seg_id,
              'abs_max_roll_std_' + str(windows)] = np.abs(x_roll_std).max()

        X.loc[seg_id, 'ave_roll_mean_' + str(windows)] = x_roll_mean.mean()
        X.loc[seg_id, 'std_roll_mean_' + str(windows)] = x_roll_mean.std()
        X.loc[seg_id, 'max_roll_mean_' + str(windows)] = x_roll_mean.max()
        X.loc[seg_id, 'min_roll_mean_' + str(windows)] = x_roll_mean.min()
        X.loc[seg_id, 'q01_roll_mean_' + str(windows)] = np.quantile(
            x_roll_mean, 0.01)
        X.loc[seg_id, 'q05_roll_mean_' + str(windows)] = np.quantile(
            x_roll_mean, 0.05)
        X.loc[seg_id, 'q95_roll_mean_' + str(windows)] = np.quantile(
            x_roll_mean, 0.95)
        X.loc[seg_id, 'q99_roll_mean_' + str(windows)] = np.quantile(
            x_roll_mean, 0.99)
        X.loc[seg_id, 'av_change_abs_roll_mean_' + str(windows)] = np.mean(
            np.diff(x_roll_mean))
        X.loc[seg_id, 'av_change_rate_roll_mean_' + str(windows)] = np.mean(
            np.nonzero((np.diff(x_roll_mean) / x_roll_mean[:-1]))[0])
        X.loc[seg_id,
              'abs_max_roll_mean_' + str(windows)] = np.abs(x_roll_mean).max()

    return X
예제 #42
0
def stack_all(st1, st2, pws=False):
    """
    Stacks all traces in two ``Stream`` objects.

    Args:
        st1 (obspy.stream): Stream 1
        st2 (obspy.stream,): Stream 2
        pws (bool, optional): Enables Phase-Weighted Stacking

    Returns:
        (tuple): tuple containing:

            * stack1 (obspy.trace): Stacked trace for Stream 1
            * stack2 (obspy.trace): Stacked trace for Stream 2

    """

    print()
    print('Stacking ALL traces in streams')

    # Copy stats from stream
    str_stats = st1[0].stats

    # Initialize arrays
    tmp1 = np.zeros(len(st1[0].data))
    tmp2 = np.zeros(len(st2[0].data))
    weight1 = np.zeros(len(st1[0].data), dtype=complex)
    weight2 = np.zeros(len(st2[0].data), dtype=complex)

    # Stack all traces
    for tr in st1:
        tmp1 += tr.data
        hilb1 = hilbert(tr.data)
        phase1 = np.arctan2(hilb1.imag, hilb1.real)
        weight1 += np.exp(1j * phase1)

    for tr in st2:
        tmp2 += tr.data
        hilb2 = hilbert(tr.data)
        phase2 = np.arctan2(hilb2.imag, hilb2.real)
        weight2 += np.exp(1j * phase2)

    # Normalize
    tmp1 = tmp1 / np.float(len(st1))
    tmp2 = tmp2 / np.float(len(st2))

    # Phase-weighting
    if pws:
        weight1 = weight1 / np.float(len(st1))
        weight2 = weight2 / np.float(len(st2))
        weight1 = np.real(abs(weight1))
        weight2 = np.real(abs(weight2))
    else:
        weight1 = np.ones(len(st1[0].data))
        weight2 = np.ones(len(st1[0].data))

    # Put back into traces
    stack1 = Trace(data=weight1 * tmp1, header=str_stats)
    stack2 = Trace(data=weight2 * tmp2, header=str_stats)

    return stack1, stack2
예제 #43
0
def check_component(data, component, t_env_theory, coeff, X, Z, dx, dz):
    print("*** Checking " + component + " ***")
    field = data['boxlib', component].v.squeeze()
    env = abs(hilbert(field))

    env_theory = t_env_theory * np.abs(coeff)

    # Plot results
    fig = plt.figure(figsize=(12, 6))

    ax1 = fig.add_subplot(221, aspect='equal')
    ax1.set_title('PIC field')
    p1 = ax1.pcolormesh(X, Z, field)
    cax1 = make_axes_locatable(ax1).append_axes('right', size='5%', pad=0.05)
    fig.colorbar(p1, cax=cax1, orientation='vertical')

    ax2 = fig.add_subplot(222, aspect='equal')
    ax2.set_title('PIC envelope')
    p2 = ax2.pcolormesh(X, Z, env)
    cax2 = make_axes_locatable(ax2).append_axes('right', size='5%', pad=0.05)
    fig.colorbar(p2, cax=cax2, orientation='vertical')

    ax3 = fig.add_subplot(223, aspect='equal')
    ax3.set_title('Theory envelope')
    p3 = ax3.pcolormesh(X, Z, env_theory)
    cax3 = make_axes_locatable(ax3).append_axes('right', size='5%', pad=0.05)
    fig.colorbar(p3, cax=cax3, orientation='vertical')

    ax4 = fig.add_subplot(224, aspect='equal')
    ax4.set_title('Difference')
    p4 = ax4.pcolormesh(X, Z, env - env_theory)
    cax4 = make_axes_locatable(ax4).append_axes('right', size='5%', pad=0.05)
    fig.colorbar(p4, cax=cax4, orientation='vertical')

    plt.tight_layout()
    plt.savefig("plt_" + component + ".png", bbox_inches='tight')

    if (np.abs(coeff) < small_num):
        is_field_zero = np.sum(np.abs(env)) < small_num
        if is_field_zero:
            print("[OK] Field component expected to be 0 is ~ 0")
        else:
            print("[FAIL] Field component expected to be 0 is NOT ~ 0")
        assert (is_field_zero)
        print("******\n")
        return

    relative_error_env = np.sum(np.abs(env - env_theory)) / np.sum(
        np.abs(env_theory))
    is_env_ok = relative_error_env < relative_error_threshold
    if is_env_ok:
        print("[OK] Relative error envelope: {:6.3f} %".format(
            relative_error_env * 100))
    else:
        print("[FAIL] Relative error envelope: {:6.3f} %".format(
            relative_error_env * 100))
    assert (is_env_ok)

    fft_field = np.fft.fft2(field)

    freq_rows = np.fft.fftfreq(fft_field.shape[0], dx / c)
    freq_cols = np.fft.fftfreq(fft_field.shape[1], dz / c)

    pos_max = np.unravel_index(np.abs(fft_field).argmax(), fft_field.shape)

    freq = np.sqrt((freq_rows[pos_max[0]])**2 + (freq_cols[pos_max[1]]**2))
    exp_freq = c / wavelength

    relative_error_freq = np.abs(freq - exp_freq) / exp_freq
    is_freq_ok = relative_error_freq < relative_error_threshold
    if is_freq_ok:
        print("[OK] Relative error frequency: {:6.3f} %".format(
            relative_error_freq * 100))
    else:
        print("[FAIL] Relative error frequency: {:6.3f} %".format(
            relative_error_freq * 100))
    assert (is_freq_ok)

    print("******\n")
finder = np.where(np.isnan(alllines))
alllines[finder] = nanmean[finder[1]]
demean = alllines - np.mean(alllines, axis=0)

plt.style.use('default')
plt.figure()
for xx in range(len(alllines)):
    plt.plot(xinterp, alllines[xx, :], color=[0.7, 0.7, 0.7, 1], linewidth=0.5)
plt.plot(xinterp, np.nanmean(alllines, axis=0), 'k-')
plt.plot(xinterp,
         np.nanmean(alllines, axis=0) + np.nanstd(alllines, axis=0), 'k--')
plt.plot(xinterp,
         np.nanmean(alllines, axis=0) - np.nanstd(alllines, axis=0), 'k--')

from scipy.signal import hilbert
data = (hilbert(demean.T))

data = data.T
c = np.matmul(np.conj(data).T, data) / np.shape(data)[0]

import scipy.linalg as la
import numpy.linalg as npla

lamda, loadings = la.eigh(c)

lamda2, loadings2 = npla.eig(c)

ind = np.argsort(lamda[::-1])

lamda[::-1].sort()
예제 #45
0
    def _compute_features(self, arr, window=False):
        if window:
            result = np.zeros_like(self.result_template_window)
        else:
            result = np.zeros_like(self.result_template)
        i = 0
        if self.minimum:
            result[i] = np.min(arr)
            i += 1
        if self.maximum:
            result[i] = np.max(arr)
            i += 1
        if self.mean:
            result[i] = np.mean(arr)
            i += 1
        if self.median:
            result[i] = np.median(arr)
            i += 1
        if self.std:
            result[i] = np.std(arr)
            i += 1
        if self.abs_min:
            result[i] = np.min(np.abs(arr))
            i += 1
        if self.abs_max:
            result[i] = np.max(np.abs(arr))
            i += 1
        if self.abs_mean:
            result[i] = np.mean(np.abs(arr))
            i += 1
        if self.abs_median:
            result[i] = np.median(np.abs(arr))
            i += 1
        if self.abs_std:
            result[i] = np.std(np.abs(arr))
            i += 1
        if self.mean_abs_delta:
            result[i] = np.mean(np.diff(arr))
            i += 1
        if self.mean_rel_delta:
            result[i] = np.mean(np.nonzero((np.diff(arr) / arr[:-1]))[0])
            i += 1
        if self.max_to_min:
            result[i] = np.max(arr) / np.abs(np.min(arr))
            i += 1
        if self.abs_trend:
            idx = np.array(range(len(arr)))
            lr = LinearRegression()
            lr.fit(idx.reshape(-1, 1), np.abs(arr))
            result[i] = lr.coef_[0]
            i += 1
        if self.mad:  # mean absolute deviation
            result[i] = np.mean(np.abs(arr - np.mean(arr)))
            i += 1
        if self.skew:
            result[i] = stats.skew(arr)
            i += 1
        if self.abs_skew:
            result[i] = stats.skew(np.abs(arr))
            i += 1
        if self.kurtosis:  # measure of tailedness
            result[i] = stats.kurtosis(arr)
            i += 1
        if self.abs_kurtosis:  # measure of tailedness
            result[i] = stats.kurtosis(np.abs(arr))
            i += 1
        if self.hilbert:  # abs mean in hilbert tranformed space
            result[i] = np.mean(np.abs(signal.hilbert(arr)))
            i += 1
        if self.hann:  # mean in hann window
            result[i] = np.mean(
                signal.convolve(arr, signal.hann(150), mode='same') /
                np.sum(signal.hann(150)))
            i += 1
        if self.quantiles is not None:
            result[i:i + len(self.quantiles)] = np.quantile(arr,
                                                            q=self.quantiles)
            i += len(self.quantiles)
        if self.abs_quantiles is not None:
            result[i:i + len(self.abs_quantiles)] = np.quantile(
                np.abs(arr), q=self.abs_quantiles)
            i += len(self.abs_quantiles)
        if self.count_abs_big is not None:
            result[i:i + len(self.count_abs_big)] = np.array(
                [len(arr[np.abs(arr) > q]) for q in self.count_abs_big])
            i += len(self.count_abs_big)
        if self.stalta:
            if window:
                result[i:i + len(self.stalta_window)] = np.array([
                    np.mean(classic_sta_lta(arr, q[0], q[1]))
                    for q in self.stalta_window
                ])
                i += len(self.stalta_window)
            else:
                result[i:i + len(self.stalta)] = np.array([
                    np.mean(classic_sta_lta(arr, q[0], q[1]))
                    for q in self.stalta
                ])
                i += len(self.stalta)
        if self.exp_mov_ave:
            if window:
                result[i:i + len(self.exp_mov_ave_window)] = np.array([
                    np.mean(pd.Series.ewm(pd.Series(arr), span=q).mean())
                    for q in self.exp_mov_ave_window
                ])
                i += len(self.exp_mov_ave_window)
            else:
                result[i:i + len(self.exp_mov_ave)] = np.array([
                    np.mean(pd.Series.ewm(pd.Series(arr), span=q).mean())
                    for q in self.exp_mov_ave
                ])
                i += len(self.exp_mov_ave)

        return result
def extract_envelope(out_channel, multithreading=False):
    hilbert_channels = []
    for channel in out_channel:
        hilbert_channels.append(np.abs(scignal.hilbert(np.real(channel))))
    return np.array(hilbert_channels)
예제 #47
0
def pro6stacked_seis(eq_file1, eq_file2, plot_scale_fac = 0.03, slow_delta = 0.0005,
			  slowR_lo = -0.1, slowR_hi = 0.1, slowT_lo = -0.1, slowT_hi = 0.1,
			  start_buff = -50, end_buff = 50, norm = 0, freq_corr = 1.0,
			  plot_dyn_range = 1000, fig_index = 401, get_stf = 0, ref_phase = 'blank',
			  ARRAY = 0, max_rat = 1.8, min_amp = 0.2, turn_off_black = 0,
			  R_slow_plot = 0, T_slow_plot = 0, tdiff_clip = 1, event_no = 0):

	import obspy
	import obspy.signal
	from obspy import UTCDateTime
	from obspy import Stream, Trace
	from obspy import read
	from obspy.geodetics import gps2dist_azimuth
	import numpy as np
	import os
	from obspy.taup import TauPyModel
	import obspy.signal as sign
	import matplotlib.pyplot as plt
	model = TauPyModel(model='iasp91')
	from scipy.signal import hilbert
	import math
	import time
	import statistics

#%% Get info
	#%% get locations
	print('Running pro6_plot_stacked_seis')
	start_time_wc = time.time()

	dphase = 'PKiKP'

	sta_file = '/Users/vidale/Documents/GitHub/Array_codes/Files/events_good.txt'
	with open(sta_file, 'r') as file:
		lines = file.readlines()
	event_count = len(lines)

	print(str(event_count) + ' lines read from ' + sta_file)
	# Load station coords into arrays
	station_index = range(event_count)
	event_names        = []

	event_index = np.zeros(event_count)
	event_year  = np.zeros(event_count)
	event_mo    = np.zeros(event_count)
	event_day   = np.zeros(event_count)
	event_hr    = np.zeros(event_count)
	event_min   = np.zeros(event_count)
	event_sec   = np.zeros(event_count)
	event_lat   = np.zeros(event_count)
	event_lon   = np.zeros(event_count)
	event_dep   = np.zeros(event_count)
	event_mb    = np.zeros(event_count)
	event_ms    = np.zeros(event_count)
	event_tstart       = np.zeros(event_count)
	event_tend         = np.zeros(event_count)
	event_gcdist       = np.zeros(event_count)
	event_dist         = np.zeros(event_count)
	event_baz          = np.zeros(event_count)
	event_SNR          = np.zeros(event_count)
	event_Sflag        = np.zeros(event_count)
	event_PKiKPflag    = np.zeros(event_count)
	event_ICSflag      = np.zeros(event_count)
	event_PKiKP_radslo = np.zeros(event_count)
	event_PKiKP_traslo = np.zeros(event_count)
	event_PKiKP_qual   = np.zeros(event_count)
	event_ICS_qual     = np.zeros(event_count)

	iii = 0
	for ii in station_index:   # read file
		line = lines[ii]
		split_line = line.split()

		event_index[ii]  = float(split_line[0])
		event_names.append(split_line[1])
		event_year[ii]   = float(split_line[2])
		event_mo[ii]     = float(split_line[3])
		event_day[ii]    = float(split_line[4])
		event_hr[ii]     = float(split_line[5])
		event_min[ii]    = float(split_line[6])
		event_sec[ii]    = float(split_line[7])
		event_lat[ii]    = float(split_line[8])
		event_lon[ii]    = float(split_line[9])
		event_dep[ii]    = float(split_line[10])
		event_mb[ii]     = float(split_line[11])
		event_ms[ii]     = float(split_line[12])
		event_tstart[ii] = float(split_line[13])
		event_tend[ii]   = float(split_line[14])
		event_gcdist[ii] = float(split_line[15])
		event_dist[ii]   = float(split_line[16])
		event_baz[ii]    = float(split_line[17])
		event_SNR[ii]    = float(split_line[18])
		event_Sflag[ii]  = float(split_line[19])
		event_PKiKPflag[ii]     = float(split_line[20])
		event_ICSflag[ii]       = float(split_line[21])
		event_PKiKP_radslo[ii]  = float(split_line[22])
		event_PKiKP_traslo[ii]  = float(split_line[23])
		event_PKiKP_qual[ii]    = float(split_line[24])
		event_ICS_qual[ii]      = float(split_line[25])
#		print('Event ' + str(ii) + ' is ' + str(event_index[ii]))
		if event_index[ii] == event_no:
			iii = ii

	if iii == 0:
		print('Event ' + str(event_no) + ' not found')
	else:
		print('Event ' + str(event_no) + ' is ' + str(iii))
	#  find predicted slowness
	arrivals1 = model.get_travel_times(source_depth_in_km=event_dep[iii],distance_in_degree=event_gcdist[iii]-0.5,phase_list=[dphase])
	arrivals2 = model.get_travel_times(source_depth_in_km=event_dep[iii],distance_in_degree=event_gcdist[iii]+0.5,phase_list=[dphase])
	dtime = arrivals2[0].time - arrivals1[0].time
	event_pred_slo  = dtime/111.  # s/km

	# convert to pred rslo and tslo
	sin_baz = np.sin(event_baz[iii] * np.pi /180)
	cos_baz = np.cos(event_baz[iii] * np.pi /180)
	pred_Nslo = event_pred_slo * cos_baz
	pred_Eslo = event_pred_slo * sin_baz

	#  rotate observed slowness to N and E
	obs_Nslo = (event_PKiKP_radslo[iii] * cos_baz) - (event_PKiKP_traslo[iii] * sin_baz)
	obs_Eslo = (event_PKiKP_radslo[iii] * sin_baz) + (event_PKiKP_traslo[iii] * cos_baz)

	print('PR '+ str(pred_Nslo) + ' PT ' + str(pred_Eslo) + ' OR ' + str(obs_Nslo) + ' OT ' + str(obs_Eslo))
	#  find observed back-azimuth
#	bazi_rad = np.arctan(event_PKiKP_traslo[ii]/event_PKiKP_radslo[ii])
#	event_obs_bazi  = event_baz[ii] + (bazi_rad * 180 / np.pi)

	if ARRAY == 1:
		goto = '/Users/vidale/Documents/PyCode/LASA/EvLocs'
		os.chdir(goto)

	file = open(eq_file1, 'r')
	lines=file.readlines()
	split_line = lines[0].split()
	t1           = UTCDateTime(split_line[1])
	date_label1  = split_line[1][0:10]

	file = open(eq_file2, 'r')
	lines=file.readlines()
	split_line = lines[0].split()
	t2           = UTCDateTime(split_line[1])
	date_label2  = split_line[1][0:10]

	#%% read files
	# #%% Get saved event info, also used to name files
	# date_label = '2018-04-02' # date for filename
	if ARRAY == 1:
		goto = '/Users/vidale/Documents/PyCode/LASA/Pro_files'
		os.chdir(goto)
	fname1 = 'HD' + date_label1 + '_2dstack.mseed'
	fname2 = 'HD' + date_label2 + '_2dstack.mseed'
	st1 = Stream()
	st2 = Stream()
	st1 = read(fname1)
	st2 = read(fname2)

	tshift    = st1.copy()  # make array for time shift
	amp_ratio = st1.copy()  # make array for relative amplitude
	amp_ave   = st1.copy()  # make array for relative amplitude

	print('Read in: event 1 ' + str(len(st1)) + ' event 2 ' + str(len(st2)) + ' traces')
	nt1 = len(st1[0].data)
	nt2 = len(st2[0].data)
	dt1 = st1[0].stats.delta
	dt2 = st2[0].stats.delta
	print('Event 1 - First trace has ' + str(nt1) + ' time pts, time sampling of '
		  + str(dt1) + ' and thus duration of ' + str((nt1-1)*dt1))
	print('Event 2 - First trace has ' + str(nt2) + ' time pts, time sampling of '
		  + str(dt2) + ' and thus duration of ' + str((nt2-1)*dt2))
	if nt1 != nt2 or dt1 != dt2:
		print('nt or dt not does not match')
		exit(-1)

	#%% Make grid of slownesses
	slowR_n = int(1 + (slowR_hi - slowR_lo)/slow_delta)  # number of slownesses
	slowT_n = int(1 + (slowT_hi - slowT_lo)/slow_delta)  # number of slownesses
	print(str(slowT_n) + ' trans slownesses, hi and lo are ' + str(slowT_hi) + '  ' + str(slowT_lo))
	# In English, stack_slows = range(slow_n) * slow_delta - slow_lo
	a1R = range(slowR_n)
	a1T = range(slowT_n)
	stack_Rslows = [(x * slow_delta + slowR_lo) for x in a1R]
	stack_Tslows = [(x * slow_delta + slowT_lo) for x in a1T]
	print(str(slowR_n) + ' radial slownesses, ' + str(slowT_n) + ' trans slownesses, ')

#%%  Loop over slowness
	total_slows = slowR_n * slowT_n
	global_max = 0
	for slow_i in range(total_slows): # find envelope, phase, tshift, and global max
		if slow_i % 200 == 0:
			print('At line 101, ' +str(slow_i) + ' slowness out of ' + str(total_slows))
		if len(st1[slow_i].data) == 0: # test for zero-length traces
				print('%d data has zero length ' % (slow_i))

		seismogram1 = hilbert(st1[slow_i].data)  # make analytic seismograms
		seismogram2 = hilbert(st2[slow_i].data)

		env1 = np.abs(seismogram1) # amplitude
		env2 = np.abs(seismogram2)
		amp_ave[slow_i].data    = 0.5 * (env1 + env2)
		amp_ratio[slow_i].data  = env1/env2

		angle1 = np.angle(seismogram1) # time shift
		angle2 = np.angle(seismogram2)
		phase1 = np.unwrap(angle1)
		phase2 = np.unwrap(angle2)
		dphase = (angle1 - angle2)
#		dphase = phase1 - phase2
		for it in range(nt1):
			if dphase[it] > math.pi:
				dphase[it] -= 2 * math.pi
			elif dphase[it] < -1 * math.pi:
				dphase[it] += 2 * math.pi
			if dphase[it] > math.pi or dphase[it] < -math.pi:
				print(f'Bad dphase value {dphase[it]:.2f}  {it:4d}')
		freq1 = np.diff(phase1) #freq in radians/sec
		freq2 = np.diff(phase2)
		ave_freq = 0.5*(freq1 + freq2)
		ave_freq_plus = np.append(ave_freq,[1]) # ave_freq one element too short
#		tshift[slow_i].data     = dphase / ave_freq_plus # 2*pi top and bottom cancels
		tshift[slow_i].data     = dphase/(2*math.pi*freq_corr)

		local_max = max(abs(amp_ave[slow_i].data))
		if local_max > global_max:
			global_max = local_max
#%% Extract slices
	tshift_full = tshift.copy()  # make array for time shift
	for slow_i in range(total_slows): # ignore less robust points
		if slow_i % 200 == 0:
			print('At line 140, ' +str(slow_i) + ' slowness out of ' + str(total_slows))
		for it in range(nt1):
			if ((amp_ratio[slow_i].data[it] < (1/max_rat)) or (amp_ratio[slow_i].data[it] > max_rat) or (amp_ave[slow_i].data[it] < (min_amp * global_max))):
				tshift[slow_i].data[it] = np.nan
	#%% If desired, find transverse slowness nearest T_slow_plot
	lowest_Tslow = 1000000
	for slow_i in range(slowT_n):
		if abs(stack_Tslows[slow_i] - T_slow_plot) < lowest_Tslow:
			lowest_Tindex = slow_i
			lowest_Tslow = abs(stack_Tslows[slow_i] - T_slow_plot)

	print(str(slowT_n) + ' T slownesses, index ' + str(lowest_Tindex) + ' is closest to input parameter ' + str(T_slow_plot) + ', slowness diff there is ' + str(lowest_Tslow) + ' and slowness is ' + str(stack_Tslows[lowest_Tindex]))
	# Select only stacks with that slowness for radial plot
	centralR_st1 = Stream()
	centralR_st2 = Stream()
	centralR_amp   = Stream()
	centralR_ampr  = Stream()
	centralR_tdiff = Stream()
	for slowR_i in range(slowR_n):
		ii = slowR_i*slowT_n + lowest_Tindex
		centralR_st1 += st1[ii]
		centralR_st2 += st2[ii]
		centralR_amp   += amp_ave[ii]
		centralR_ampr  += amp_ratio[ii]
		centralR_tdiff += tshift[ii]

	#%% If desired, find radial slowness nearest R_slow_plot
	lowest_Rslow = 1000000
	for slow_i in range(slowR_n):
		if abs(stack_Rslows[slow_i] - R_slow_plot) < lowest_Rslow:
			lowest_Rindex = slow_i
			lowest_Rslow = abs(stack_Rslows[slow_i] - R_slow_plot)

	print(str(slowR_n) + ' R slownesses, index ' + str(lowest_Rindex) + ' is closest to input parameter ' + str(R_slow_plot) + ', slowness diff there is ' + str(lowest_Rslow) + ' and slowness is ' + str(stack_Rslows[lowest_Rindex]))

	# Select only stacks with that slowness for transverse plot
	centralT_st1 = Stream()
	centralT_st2 = Stream()
	centralT_amp   = Stream()
	centralT_ampr  = Stream()
	centralT_tdiff = Stream()

	#%% to extract stacked time functions
	event1_sample = Stream()
	event2_sample = Stream()

	for slowT_i in range(slowT_n):
		ii = lowest_Rindex*slowT_n + slowT_i
		centralT_st1 += st1[ii]
		centralT_st2 += st2[ii]
		centralT_amp   += amp_ave[ii]
		centralT_ampr  += amp_ratio[ii]
		centralT_tdiff += tshift[ii]

	#%% compute timing time series
	ttt = (np.arange(len(st1[0].data)) * st1[0].stats.delta + start_buff) # in units of seconds

#%% Plot radial amp and tdiff vs time plots
	fig_index = 6
#	plt.close(fig_index)
	plt.figure(fig_index,figsize=(30,10))
	plt.xlim(start_buff,end_buff)
	plt.ylim(stack_Rslows[0], stack_Rslows[-1])
	for slowR_i in range(slowR_n):  # loop over radial slownesses
		dist_offset = stack_Rslows[slowR_i] # trying for approx degrees
		ttt = (np.arange(len(centralR_st1[slowR_i].data)) * centralR_st1[slowR_i].stats.delta
		 + (centralR_st1[slowR_i].stats.starttime - t1))
		plt.plot(ttt, (centralR_st1[slowR_i].data - np.median(centralR_st1[slowR_i].data))*plot_scale_fac /global_max + dist_offset, color = 'green')
		plt.plot(ttt, (centralR_st2[slowR_i].data - np.median(centralR_st2[slowR_i].data))*plot_scale_fac /global_max + dist_offset, color = 'red')
		# extract stacked time functions
		if get_stf != 0:
			if np.abs(stack_Rslows[slowR_i]- 0.005) < 0.000001: # kludge, not exactly zero when desired
				event1_sample = centralR_st1[slowR_i].copy()
				event2_sample = centralR_st2[slowR_i].copy()
#		plt.plot(ttt, (centralR_amp[slowR_i].data)  *plot_scale_fac/global_max + dist_offset, color = 'purple')
		if turn_off_black == 0:
			plt.plot(ttt, (centralR_tdiff[slowR_i].data)*plot_scale_fac/1 + dist_offset, color = 'black')
			plt.plot(ttt, (centralR_amp[slowR_i].data)*0.0 + dist_offset, color = 'lightgray') # reference lines
	plt.xlabel('Time (s)')
	plt.ylabel('R Slowness (s/km)')
	plt.title(ref_phase + ' seismograms and tdiff at ' + str(T_slow_plot) + ' T slowness, green is event1, red is event2')
	# Plot transverse amp and tdiff vs time plots
	fig_index = 7
#	plt.close(fig_index)
	plt.figure(fig_index,figsize=(30,10))
	plt.xlim(start_buff,end_buff)
	plt.ylim(stack_Tslows[0], stack_Tslows[-1])

	for slowT_i in range(slowT_n):  # loop over transverse slownesses
		dist_offset = stack_Tslows[slowT_i] # trying for approx degrees
		ttt = (np.arange(len(centralT_st1[slowT_i].data)) * centralT_st1[slowT_i].stats.delta
		 + (centralT_st1[slowT_i].stats.starttime - t1))
		plt.plot(ttt, (centralT_st1[slowT_i].data - np.median(centralT_st1[slowT_i].data))*plot_scale_fac /global_max + dist_offset, color = 'green')
		plt.plot(ttt, (centralT_st2[slowT_i].data - np.median(centralT_st2[slowT_i].data))*plot_scale_fac /global_max + dist_offset, color = 'red')
#		plt.plot(ttt, (centralT_amp[slowT_i].data)  *plot_scale_fac/global_max + dist_offset, color = 'purple')
		if turn_off_black == 0:
			plt.plot(ttt, (centralT_tdiff[slowT_i].data)*plot_scale_fac/1 + dist_offset, color = 'black')
			plt.plot(ttt, (centralT_amp[slowT_i].data)*0.0 + dist_offset, color = 'lightgray') # reference lines
	plt.xlabel('Time (s)')
	plt.ylabel('T Slowness (s/km)')
	plt.title(str(event_no) + '  ' + date_label1 + '  ' +ref_phase + ' seismograms and tdiff ' + str(R_slow_plot) + ' R slowness, green is event1, red is event2')
	os.chdir('/Users/vidale/Documents/PyCode/LASA/Quake_results/Plots')
#	plt.savefig(date_label1 + '_' + str(start_buff) + '_' + str(end_buff) + '_stack.png')

#%% R-T tshift averaged over time window
	fig_index = 8
	stack_slice = np.zeros((slowR_n,slowT_n))
	for slowR_i in range(slowR_n):  # loop over radial slownesses
		for slowT_i in range(slowT_n):  # loop over transverse slownesses
			index = slowR_i*slowT_n + slowT_i
			num_val = np.nanmedian(tshift[index].data)
#			num_val = statistics.median(tshift_full[index].data)
			stack_slice[slowR_i, slowT_i] = num_val # adjust for dominant frequency of 1.2 Hz, not 1 Hz
#	stack_slice[0,0] = -0.25
#	stack_slice[0,1] =  0.25
#	tdiff_clip = 0.4/1.2
	tdiff_clip_max =  tdiff_clip  # DO NOT LEAVE COMMENTED OUT!!
	tdiff_clip_min = -tdiff_clip

	y1, x1 = np.mgrid[slice(stack_Rslows[0], stack_Rslows[-1] + slow_delta, slow_delta),
				 slice(stack_Tslows[0], stack_Tslows[-1] + slow_delta, slow_delta)]

	fig, ax = plt.subplots(1, figsize=(7,6))
#		fig, ax = plt.subplots(1, figsize=(9,2))
#		fig.subplots_adjust(bottom=0.3)
#	c = ax.pcolormesh(x1, y1, stack_slice, cmap=plt.cm.bwr,      vmin = tdiff_clip_min, vmax = tdiff_clip_max)
	c = ax.pcolormesh(x1, y1, stack_slice, cmap=plt.cm.coolwarm, vmin = tdiff_clip_min, vmax = tdiff_clip_max)
	ax.axis([x1.min(), x1.max(), y1.min(), y1.max()])
	circle1 = plt.Circle((0, 0), 0.019, color='black', fill=False)
	ax.add_artist(circle1)
	circle2 = plt.Circle((0, 0), 0.040, color='black', fill=False)
	ax.add_artist(circle2)  #outer core limit
	fig.colorbar(c, ax=ax)
	plt.ylabel('R Slowness (s/km)')
	plt.title(ref_phase + ' time shift')
#	plt.title('T-R average time shift ' + date_label1 + ' ' + date_label2)
	plt.show()

#%% R-T amplitude averaged over time window
	fig_index = 9
	stack_slice = np.zeros((slowR_n,slowT_n))
	smax = 0
	for slowR_i in range(slowR_n):  # loop over radial slownesses
		for slowT_i in range(slowT_n):  # loop over transverse slownesses
			index = slowR_i*slowT_n + slowT_i
			num_val = np.nanmedian(amp_ave[index].data)
			stack_slice[slowR_i, slowT_i] = num_val
			if num_val > smax:
				smax = num_val
#	stack_slice[0,0] = 0

	y1, x1 = np.mgrid[slice(stack_Rslows[0], stack_Rslows[-1] + slow_delta, slow_delta),
				 slice(stack_Tslows[0], stack_Tslows[-1] + slow_delta, slow_delta)]

#	fig, ax = plt.subplots(1)
	fig, ax = plt.subplots(1, figsize=(7,6))
#	c = ax.pcolormesh(x1, y1, stack_slice/smax, cmap=plt.cm.gist_yarg, vmin = 0.5)
	c = ax.pcolormesh(x1, y1, stack_slice/smax, cmap=plt.cm.gist_rainbow_r, vmin = 0)
#	c = ax.pcolormesh(x1, y1, stack_slice, cmap=plt.cm.gist_rainbow_r, vmin = 0)
	ax.axis([x1.min(), x1.max(), y1.min(), y1.max()])
	circle1 = plt.Circle((0, 0), 0.019, color='black', fill=False)
	ax.add_artist(circle1)  #inner core limit
	circle2 = plt.Circle((0, 0), 0.040, color='black', fill=False)
	ax.add_artist(circle2)  #outer core limit

	c = ax.scatter(pred_Eslo, pred_Nslo, color='blue', s=100, alpha=0.75)
	c = ax.scatter(obs_Eslo, obs_Nslo, color='black', s=100, alpha=0.75)

	fig.colorbar(c, ax=ax)
	plt.xlabel('Transverse Slowness (s/km)')
	plt.ylabel('Radial Slowness (s/km)')
	plt.title(str(event_no) + '  ' + date_label1 + '  ' + ref_phase + ' beam amplitude')
#	plt.title('Beam amplitude ' + date_label1 + ' ' + date_label2)
	os.chdir('/Users/vidale/Documents/PyCode/LASA/Quake_results/Plots')
	plt.savefig(date_label1 + '_' + str(start_buff) + '_' + str(end_buff) + '_beam.png')
	plt.show()

#%%  Save processed files
	if ARRAY == 0:
		goto = '/Users/vidale/Documents/PyCode/Hinet'
	if ARRAY == 1:
		goto = '/Users/vidale/Documents/PyCode/LASA/Pro_Files'
	os.chdir(goto)

	fname = 'HD' + date_label1 + '_' + date_label2 + '_tshift.mseed'
	tshift_full.write(fname,format = 'MSEED')

	fname = 'HD' + date_label1 + '_' + date_label2 + '_amp_ave.mseed'
	amp_ave.write(fname,format = 'MSEED')

	fname = 'HD' + date_label1 + '_' + date_label2 + '_amp_ratio.mseed'
	amp_ratio.write(fname,format = 'MSEED')

#%% Option to write out stf
	if get_stf != 0:
		event1_sample.taper(0.1)
		event2_sample.taper(0.1)
		fname = 'HD' + date_label1 + '_stf.mseed'
		event1_sample.write(fname,format = 'MSEED')
		fname = 'HD' + date_label2 + '_stf.mseed'
		event2_sample.write(fname,format = 'MSEED')

	elapsed_time_wc = time.time() - start_time_wc
	print('This job took ' + str(elapsed_time_wc) + ' seconds')
	os.system('say "Done"')
        # plt.text(seis[0].stats.traveltimes[k]-align_time, np.round(seis[0].stats['az']),k, fontsize = 8)
        # plt.text(seis[0].stats.traveltimes[k]-seis[0].stats.traveltimes[phase],np.round(seis[0].stats['az'])-0.5, k)
    timewindow = 3 * (1 / fmin)
    print('NORM VALUE: ' + str(norm))
    w0 = np.argmin(
        np.abs(
            seistoplot.times(reftime=seistoplot.stats['eventtime']) -
            phase_time + timewindow / 3))
    w1 = np.argmin(
        np.abs(
            seistoplot.times(reftime=seistoplot.stats['eventtime']) -
            phase_time - timewindow * 2 / 3))
    window_wid = seistoplot.times(
        reftime=seistoplot.stats['eventtime'])[w1] - seistoplot.times(
            reftime=seistoplot.stats['eventtime'])[w0]
    window_hei = np.max(np.abs(hilbert(seistoplot.data[w0:w1]))) / norm
    # plot the picked window
    # gca().add_patch(patches.Rectangle((seistoplot.times(reftime=seistoplot.stats['eventtime'])[w0]-phase_time,np.round(seis[0].stats['az'])-window_hei),window_wid,2*window_hei,alpha = 0.2, color = 'red'))  # width height
    # A0_phase = np.abs(hilbert(seistoplot.data[w0:w1])).max()/norm

    # w0_noise1 = np.argmin(np.abs(seistoplot.times(reftime=seistoplot.stats['eventtime'])-noise_time+timewindow/3))        # arg of ref, adapative time window
    # w1_noise1 = np.argmin(np.abs(seistoplot.times(reftime=seistoplot.stats['eventtime'])-noise_time-timewindow*2/3))
    # A0_noise1 = np.abs(hilbert(seistoplot.data[w0_noise1:w1_noise1])).max()/norm
    # #gca().add_patch(patches.Rectangle((seistoplot.timesarray[w0_ref]-phase_time,np.round(seis[0].stats['az'])-A0),seistoplot.timesarray[w1_ref]-seistoplot.timesarray[w0_ref],2*A0,alpha = 0.4, color = 'red'))
    # w0_noise2 = np.argmin(np.abs(seistoplot.times(reftime=seistoplot.stats['eventtime'])-noise_time+timewindow/3-100))        # arg of ref, adapative time window
    # w1_noise2 = np.argmin(np.abs(seistoplot.times(reftime=seistoplot.stats['eventtime'])-noise_time-timewindow*2/3-100))
    # A0_noise2 = np.abs(hilbert(seistoplot.data[w0_noise2:w1_noise2])).max()/norm

    # w0_noise3 = np.argmin(np.abs(seistoplot.times(reftime=seistoplot.stats['eventtime'])-noise_time+timewindow/3-200))        # arg of ref, adapative time window
    # w1_noise3 = np.argmin(np.abs(seistoplot.times(reftime=seistoplot.stats['eventtime'])-noise_time-timewindow*2/3-200))
    # A0_noise3 = np.abs(hilbert(seistoplot.data[w0_noise3:w1_noise3])).max()/norm
예제 #49
0
file = file.set_duration(duration)
file = file.resize((320, 240))
file_audio = file.audio

ten_ms_avg = lambda i, f: np.sqrt(
    (f.subclip(i, i + 0.01).to_soundarray()**2).mean())
sig = [ten_ms_avg(i, file_audio) for i in np.arange(0.0, duration // 1, 0.01)]

sig_len = len(sig)

freq = sig_len / int(file.duration) + 0.0
t = np.arange(0, sig_len, 1) / freq

# print("phasr")

hil_sig = signal.hilbert(sig)

sig_env = np.abs(hil_sig)

avg = np.mean(sig_env)
# print(avg)
norm_env = [i / avg for i in sig_env]

b, a = signal.butter(3, 0.05)
smooth_env = signal.filtfilt(b, a, norm_env)

# plt.plot(t, smooth_env)
# plt.show()

#will find better ways to get this; although it works well
high = 1.8
예제 #50
0
파일: processing.py 프로젝트: ETDelaney/GI
def processing_correlation(ccf, f, verbose=0):
    """
	ccf = processing_correlation(ccf,f,verbose=0)

	Perform correlation function processing in the time and frequency domains.

	INPUT:
	------
	ccf:			Frequency-domain correlation function.
	f:				Frequency axis.
	verbose:		Give screen output when 1.

	OUTPUT:
	-------
	ccf:			Processed frequency-domain correlation function.
	
	Last updated: 8 February 2016.
	"""

    #==============================================================================
    #- Initialisation.
    #==============================================================================

    #- Start time.
    t1 = time.time()

    #- Input parameters.
    p = parameters.Parameters()

    #- Time and frequency axes.
    n = np.shape(ccf)[0]
    df = f[1]
    t = np.arange(-0.5 * n * p.dt, 0.5 * n * p.dt, p.dt)

    #- Initialise processed recordings.
    cct = np.zeros([n, p.Nwindows], dtype=float)

    #==============================================================================
    #- Time-domain processing.
    #==============================================================================

    #- Compute time-domain correlation. -------------------------------------------

    if p.process_causal_acausal_average == 1 or p.process_correlation_normalisation == 1 or p.process_phase_weighted_stack == 1:

        cct = np.real(np.fft.ifft(ccf, axis=0))

    #- Average causal and acausal branches. ---------------------------------------

    if p.process_causal_acausal_average == 1:

        if verbose == 1: print 'average causal and acausal branch'

        cct[:, :] = 0.5 * (cct[:, :] + cct[::-1, :])

    #- Time-domain normalisation. -------------------------------------------------

    if p.process_correlation_normalisation == 1:

        if verbose == 1: print 'normalise correlations'

        for k in range(p.Nwindows):
            cct[:, k] = cct[:, k] / np.max(np.abs(cct[:, k]))

    #- Phase-weighted stack. ------------------------------------------------------

    if p.process_phase_weighted_stack == 1:

        if verbose == 1: print 'phase-weighted stack'

        #- Compute phase stack.
        s = signal.hilbert(cct, axis=0)
        phi = np.angle(s)
        ps = np.abs(np.sum(np.exp(1j * phi), 1) / n)

        #- Apply phase stack
        for k in range(p.Nwindows):
            cct[:, k] = cct[:, k] * ps

    #- Return to frequency domain. ------------------------------------------------

    if p.process_causal_acausal_average == 1 or p.process_correlation_normalisation == 1 or p.process_phase_weighted_stack == 1:

        ccf = np.fft.fft(cct, axis=0)

    #==============================================================================
    #- Clean up and return.
    #==============================================================================

    #- Return.
    return ccf
예제 #51
0
def plot_detection(detector_dict=None, specific_plot=None):
    """
    1. Plots intensity readings on array of 'fdtd.LineDetector' as a function of timestep.
    2. Plots time of arrival of pulse at different LineDetector in array.
    Compatible with pulse sources.

    Args:
        detector_dict (dictionary): Dictionary of detector readings, as created by 'fdtd.Grid.save_data()'.
        (optional) specific_plot (string): Plot for a specific axis data. Choose from {"Ex", "Ey", "Ez", "Hx", "Hy", "Hz"}.
    """
    if detector_dict is None:
        raise Exception(
            "Function plotDetection() requires a dictionary of detector readings as 'detector_dict' parameter."
        )
    detectorElement = 0  # cell to consider in each detectors
    maxArray = {}
    plt.ioff()
    plt.close()

    for detector in detector_dict:
        if len(detector_dict[detector].shape) != 3:
            print("Detector '{}' not LineDetector; dumped.".format(detector))
            continue
        if specific_plot is not None:
            if detector[-2] != specific_plot[0]:
                continue
        if detector[-2] == "E":
            plt.figure(0, figsize=(15, 15))
        elif detector[-2] == "H":
            plt.figure(1, figsize=(15, 15))
        for dimension in range(len(detector_dict[detector][0][0])):
            if specific_plot is not None:
                if ["x", "y", "z"].index(specific_plot[1]) != dimension:
                    continue
            # if specific_plot, subplot on 1x1, else subplot on 2x2
            plt.subplot(
                2 - int(specific_plot is not None),
                2 - int(specific_plot is not None),
                dimension + 1 if specific_plot is None else 1,
            )
            hilbertPlot = abs(
                hilbert([x[0][dimension] for x in detector_dict[detector]])
            )
            plt.plot(hilbertPlot, label=detector)
            plt.title(detector[-2] + "(" + ["x", "y", "z"][dimension] + ")")
            if detector[-2] not in maxArray:
                maxArray[detector[-2]] = {}
            if str(dimension) not in maxArray[detector[-2]]:
                maxArray[detector[-2]][str(dimension)] = []
            maxArray[detector[-2]][str(dimension)].append(
                [detector, where(hilbertPlot == max(hilbertPlot))[0][0]]
            )

    # Loop same as above, only to add axes labels
    for i in range(2):
        if specific_plot is not None:
            if ["E", "H"][i] != specific_plot[0]:
                continue
        plt.figure(i)
        for dimension in range(len(detector_dict[detector][0][0])):
            if specific_plot is not None:
                if ["x", "y", "z"].index(specific_plot[1]) != dimension:
                    continue
            plt.subplot(
                2 - int(specific_plot is not None),
                2 - int(specific_plot is not None),
                dimension + 1 if specific_plot is None else 1,
            )
            plt.xlabel("Time steps")
            plt.ylabel("Magnitude")
        plt.suptitle("Intensity profile")
    plt.legend()
    plt.show()

    for item in maxArray:
        plt.figure(figsize=(15, 15))
        for dimension in maxArray[item]:
            arrival = bd.numpy(maxArray[item][dimension])
            plt.plot(
                [int(x) for x in arrival.T[1]],
                arrival.T[0],
                label=["x", "y", "z"][int(dimension)],
            )
        plt.title(item)
        plt.xlabel("Time of arrival (time steps)")
        plt.legend()
        plt.suptitle("Time-of-arrival plot")
    plt.show()
예제 #52
0
def GetEnveloppe(signal):
    analytic_signal = hilbert(signal)
    amplitude_envelope = np.abs(analytic_signal)
    return amplitude_envelope
예제 #53
0
    t = np.arange(0, n * ts, step=ts)
    sig = sig_ampl * np.sin(2 * np.pi * sig_freq * t)
    carrier_freq = 50
    carrier_amplitude = sig_ampl
    sig_xlim = (0, 0.4)

    phase_modulated = carrier_amplitude * np.sin(2 * np.pi * carrier_freq * t + sig)

    sig_integrated = np.zeros_like(sig)
    for i, dt in enumerate(t):
        sig_integrated[i] = integrate.simps(sig, dx=t[i])

    freq_modulated = carrier_amplitude * np.sin(2 * np.pi * carrier_freq * t + sig_ampl * sig_integrated)

    analytic_signal = hilbert(phase_modulated)
    phase_function = np.unwrap(np.angle(analytic_signal) + np.pi / 2)

    phase_demodulated = phase_function - 2 * np.pi * carrier_freq * t
    freq_demodulated = phase_function - 2 * np.pi * carrier_freq * t

    fft_freq = np.fft.fftfreq(n, ts)

    phase_modulated_fft = abs(np.fft.fft(phase_modulated)) / n * 2
    phase_demodulated_fft = abs(np.fft.fft(phase_demodulated)) / n * 2

    freq_modulated_fft = abs(np.fft.fft(freq_modulated)) / n * 2
    freq_demodulated_fft = abs(np.fft.fft(freq_demodulated)) / n * 2

    plot_graphic(t, phase_modulated,
                 xlim=sig_xlim,
예제 #54
0
파일: analysis.py 프로젝트: mrowan137/amrex
def do_analysis(fname, compname, steps):
    ds = yt.load(fname)

    dt = ds.current_time.to_value() / steps

    # Define 2D meshes
    x = np.linspace(ds.domain_left_edge[0], ds.domain_right_edge[0],
                    ds.domain_dimensions[0]).v
    z = np.linspace(ds.domain_left_edge[ds.dimensionality - 1],
                    ds.domain_right_edge[ds.dimensionality - 1],
                    ds.domain_dimensions[ds.dimensionality - 1]).v
    X, Z = np.meshgrid(x, z, sparse=False, indexing='ij')

    # Compute the theory for envelope
    env_theory = gauss_env(
        +t_c - ds.current_time.to_value(), X, Z) + gauss_env(
            -t_c + ds.current_time.to_value(), X, Z)

    # Read laser field in PIC simulation, and compute envelope
    all_data_level_0 = ds.covering_grid(level=0,
                                        left_edge=ds.domain_left_edge,
                                        dims=ds.domain_dimensions)
    F_laser = all_data_level_0['boxlib', 'Ey'].v.squeeze()
    env = abs(hilbert(F_laser))
    extent = [
        ds.domain_left_edge[ds.dimensionality - 1],
        ds.domain_right_edge[ds.dimensionality - 1], ds.domain_left_edge[0],
        ds.domain_right_edge[0]
    ]

    # Plot results
    plt.figure(figsize=(8, 6))
    plt.subplot(221)
    plt.title('PIC field')
    plt.imshow(F_laser, extent=extent)
    plt.colorbar()
    plt.subplot(222)
    plt.title('PIC envelope')
    plt.imshow(env, extent=extent)
    plt.colorbar()
    plt.subplot(223)
    plt.title('Theory envelope')
    plt.imshow(env_theory, extent=extent)
    plt.colorbar()
    plt.subplot(224)
    plt.title('Difference')
    plt.imshow(env - env_theory, extent=extent)
    plt.colorbar()
    plt.tight_layout()
    plt.savefig(compname, bbox_inches='tight')

    relative_error_env = np.sum(np.abs(env - env_theory)) / np.sum(np.abs(env))
    print("Relative error envelope: ", relative_error_env)
    assert (relative_error_env < relative_error_threshold)

    fft_F_laser = np.fft.fft2(F_laser)

    freq_rows = np.fft.fftfreq(F_laser.shape[0], dt)
    freq_cols = np.fft.fftfreq(F_laser.shape[1], dt)

    pos_max = np.unravel_index(np.abs(fft_F_laser).argmax(), fft_F_laser.shape)

    freq = np.sqrt((freq_rows[pos_max[0]])**2 + (freq_cols[pos_max[1]]**2))
    exp_freq = c / wavelength

    relative_error_freq = np.abs(freq - exp_freq) / exp_freq
    print("Relative error frequency: ", relative_error_freq)
    assert (relative_error_freq < relative_error_threshold)
예제 #55
0
#FROM NIKO SETUP ONE CHANNEL NEEDS TO BE *1000

proc_data['Sub'] = proc_data['Sub'] * 1000

#%%  OPTIONAL EVA
#DECIDING IF using RIPPLE detection is ok and if so DECIDING THE THRESHOLD

# get the ripple frequency filtered data
#ripple_filt={}
ripple_env = {}
lp_signal = {}
derivata = {}
for key, value in proc_data.items():
    #ripple_filt[key]=bandpass(value,150,300)
    bandpass_ref = bandpass(value, 0.1, 30, fs)
    lp_signal[key] = np.abs(signal.hilbert(bandpass_ref, axis=0))
    ripple_env[key] = np.abs(signal.hilbert(bandpass(value, 150, 300), axis=0))
    #ripple_env[key]=highpass(ripple_env[key],1)
    derivata[key] = np.diff(bandpass(value, 0.1, 30), axis=0)

# try to play around with threshold of ripple_envelope
base = np.std(
    ripple_env['Sub'])  #!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! change here
th4 = base * 4
th5 = base * 6
#x=np.linspace(0,ripple_env['CA3'].shape[0]-1,ripple_env['CA3'].shape[0])
#y4=np.tile(th4,ripple_env['CA3'].shape[0])
#y5=np.tile(th5,ripple_env['CA3'].shape[0])
#plt.plot(ripple_env['CA3']) #!!!!!!!!!!!!!!!!!! change here
plt.figure()
x = np.linspace(0, 6000000 - 1, 6000000)
예제 #56
0
    #          window_hei = np.max(np.abs(hilbert(seistoplot.data[w0:w1])))/norm
    #        #                test_w0 = np.argmin(np.abs(seistoplot.times()-Sdifftime+(-1)))
    #        #                test_w1 = np.argmin(np.abs(seistoplot.times()-Sdifftime-149))
    #        #                test_window_hei = np.max(np.abs(hilbert(seistoplot.data[test_w0:test_w1])))/norm
    #        #                print('Window difference: '+str( (test_window_hei-window_hei)/window_hei))
    #          gca().add_patch(patches.Rectangle((seistoplot.times[w0]-Sdifftime,np.round(seis[0].stats['az'])-window_hei),window_wid,2*window_hei,alpha = 0.2, color = 'red'))  # width height

    w0_ref = np.argmin(
        np.abs(
            seistoplot.times(reftime=seistoplot.stats['eventtime']) -
            align_time + 10))  # arg of ref, adapative time window
    w1_ref = np.argmin(
        np.abs(
            seistoplot.times(reftime=seistoplot.stats['eventtime']) -
            align_time - 20))
    A0 = np.max(np.abs(hilbert(seistoplot.data[w0_ref:w1_ref]))) / norm
    #gca().add_patch(patches.Rectangle((seistoplot.timesarray[w0_ref]-align_time,np.round(seis[0].stats['az'])-A0),seistoplot.timesarray[w1_ref]-seistoplot.timesarray[w0_ref],2*A0,alpha = 0.4, color = 'red'))
    threshold = A0  #np.max(seistoplot.data/norm)

    print(threshold)
    if (
        (threshold > 0.5 or threshold < 0.05)
    ):  # and seis[0].stats['dist']>100) or (threshold<0.1 and seis[0].stats['dist']<100):
        strange_trace.append([s, seis[0].stats['az'], s_name, threshold])

    #plt.text(71,np.round(seis[0].stats['az'])+A0,s_name+' #'+str(s), fontsize = 8)
# Put labels on graphs
print('!!!!!!!!!!!!!!!!!!!Stange Traces:----------------------->>>>>>')
print(strange_trace)
plt.subplot(1, 4, 1)
plt.title(' Sdiff dist < %d' % dist_range_1)
예제 #57
0
def hilbert_transform(input_data):
    complex_data = hilbert(input_data, axis=0)
    return complex_data.conj()
                        data = iti_array, 
                        lowcut = band[0],
                        highcut = band[1],
                        fs = Fs) \
                                for band in tqdm(band_freqs)])

affective_lfp_bandpassed = np.asarray([
                    butter_bandpass_filter(
                        data = affective_dat.all_lfp_array, 
                        lowcut = band[0],
                        highcut = band[1],
                        fs = Fs) \
                                for band in tqdm(band_freqs)])

# Calculate Hilbert and amplitude
whole_lfp_hilbert = hilbert(whole_lfp_bandpassed)
iti_lfp_hilbert = hilbert(iti_lfp_bandpassed)
affective_lfp_hilbert = hilbert(affective_lfp_bandpassed)

whole_lfp_amplitude = np.abs(whole_lfp_hilbert)
iti_lfp_amplitude = np.abs(iti_lfp_hilbert)
affective_lfp_amplitude = np.abs(affective_lfp_hilbert)


# Plot to make sure it's working
tmax = 100000
plt.plot(whole_lfp_bandpassed[0,0,:tmax])
plt.plot(whole_lfp_ampltidue[0,0,:tmax])
plt.show()

# Downsample amplitude to make it more amenable to plotting
예제 #59
0
파일: ambiguity.py 프로젝트: jaidevd/pytftb
def wide_band(signal, fmin=None, fmax=None, N=None):
    if 1 in signal.shape:
        signal = signal.ravel()
    elif signal.ndim != 1:
        raise ValueError("The input signal should be one dimensional.")
    s_ana = hilbert(np.real(signal))
    nx = signal.shape[0]
    m = int(np.round(nx / 2.0))
    t = np.arange(nx) - m
    tmin = 0
    tmax = nx - 1
    T = tmax - tmin

    # determine default values for fmin, fmax
    if (fmin is None) or (fmax is None):
        STF = np.fft.fftshift(s_ana)
        sp = np.abs(STF[:m])**2
        maxsp = np.amax(sp)
        f = np.linspace(0, 0.5, m + 1)
        f = f[:m]
        indmin = _find(sp > maxsp / 100.0).min()
        indmax = _find(sp > maxsp / 100.0).max()
        if fmin is None:
            fmin = max([0.01, 0.05 * np.fix(f[indmin] / 0.05)])
        if fmax is None:
            fmax = 0.05 * np.ceil(f[indmax] / 0.05)
    B = fmax - fmin
    R = B / ((fmin + fmax) / 2.0)
    nq = np.ceil((B * T * (1 + 2.0 / R) * np.log(
        (1 + R / 2.0) / (1 - R / 2.0))) / 2.0)  # NOQA
    nmin = nq - (nq % 2)
    if N is None:
        N = int(2**(nextpow2(nmin)))

    # geometric sampling for the analyzed spectrum
    k = np.arange(1, N + 1)
    q = (fmax / fmin)**(1.0 / (N - 1))
    geo_f = fmin * (np.exp((k - 1) * np.log(q)))
    tfmatx = -2j * np.dot(t.reshape(-1, 1), geo_f.reshape(1, -1)) * np.pi
    tfmatx = np.exp(tfmatx)
    S = np.dot(s_ana.reshape(1, -1), tfmatx)
    S = np.tile(S, (nx, 1))
    Sb = S * tfmatx

    tau = t
    S = np.c_[S, np.zeros((nx, N))].T
    Sb = np.c_[Sb, np.zeros((nx, N))].T

    # mellin transform computation of the analyzed signal
    p = np.arange(2 * N)
    coef = np.exp(p / 2.0 * np.log(q))
    mellinS = np.fft.fftshift(np.fft.ifft(S[:, 0] * coef))
    mellinS = np.tile(mellinS, (nx, 1)).T

    mellinSb = np.zeros((2 * N, nx), dtype=complex)
    for i in range(nx):
        mellinSb[:, i] = np.fft.fftshift(np.fft.ifft(Sb[:, i] * coef))

    k = np.arange(1, 2 * N + 1)
    scale = np.logspace(np.log10(fmin / fmax), np.log10(fmax / fmin), N)
    theta = np.log(scale)
    mellinSSb = mellinS * np.conj(mellinSb)

    waf = np.fft.ifft(mellinSSb, N, axis=0)
    no2 = int((N + N % 2) / 2.0)
    waf = np.r_[waf[no2:(N + 1), :], waf[:no2, :]]

    # normalization
    s = np.real(s_ana)
    SP = np.fft.fft(hilbert(s))
    indmin = int(1 + np.round(fmin * (nx - 2)))
    indmax = int(1 + np.round(fmax * (nx - 2)))
    sp_ana = SP[(indmin - 1):indmax]
    waf *= (np.linalg.norm(sp_ana)**2) / waf[no2 - 1, m - 1] / N

    return waf, tau, theta
def plot_template(idx, db_path_T='template_db_2/', db_path=autodet.cfg.dbpath, CC_comp=False, mv_view=True, show=True):
    font = {'family' : 'serif', 'weight' : 'normal', 'size' : 14}
    plt.rc('font', **font)
    template = autodet.db_h5py.read_template('template{:d}'.format(idx), db_path=db_path+db_path_T)
    if 'loc_uncertainty' not in template.metadata.keys():
        template.metadata['loc_uncertainty'] = 10000.
    sta = list(template.metadata['stations'])
    sta.sort()
    ns = len(sta)
    nc = template.metadata['channels'].size
    if CC_comp:
        CC = np.zeros(ns, dtype=np.float32)
        from scipy.signal import hilbert
        for s in range(ns):
            H = []
            num = np.ones(template.select(station=sta[s])[0].data.size, dtype=np.float32)
            den = 1.
            for c in range(nc):
                H.append(np.abs(hilbert(template.select(station=sta[s])[c].data)))
                if np.var(H[-1]) == 0.:
                    H[-1] = np.ones(len(H[-1]), dtype=np.float32)
                num *= H[-1]
                den *= np.power(H[-1], 3).sum()
            num = num.sum()
            den = np.power(den, 1./3.)
            if den != 0.:
                CC[s] = num/den
    plt.figure('template_{:d}_from_{}'.format(idx, db_path+db_path_T), figsize=(20,12))
    if mv_view:
        data_availability = np.zeros(ns, dtype=np.bool)
        for s in range(ns):
            sig = 0.
            for tr in template.select(station=template.metadata['stations'][s]):
                sig += np.var(tr.data)
            if np.isnan(sig):
                data_availability[s] = False
            else:
                data_availability[s] = True
        MVs = np.int32(np.float32([template.metadata['s_moveouts'], template.metadata['s_moveouts'], template.metadata['p_moveouts']]) * \
                                  template.metadata['sampling_rate'])
        time = np.arange(template.traces[0].data.size + MVs[0,data_availability].max())
    else:
        time = np.arange(template.traces[0].data.size)
    for s in range(ns):
        for c in range(nc):
            ss = np.where(template.metadata['stations'] == sta[s])[0][0]
            plt.subplot(ns,nc,s*nc+c+1)
            lab = '{}.{}'.format(template.select(station=sta[s])[c].stats['station'],template.select(station=sta[s])[c].stats['channel'])
            if CC_comp:
                lab += ' {:.2f}'.format(CC[s])
            if mv_view:
                id1 = MVs[c,ss]
                id2 = id1 + template.traces[0].data.size
                if data_availability[ss]:
                    plt.plot(time[id1:id2], template.select(station=sta[s])[c].data, label=lab)
                    if c < 2:
                        plt.axvline(int((id1+id2)/2), lw=2, ls='--', color='k')
                    else:
                        plt.axvline(int(id1 + 1. * template.metadata['sampling_rate']), lw=2, ls='--', color='k')
                else:
                    plt.plot(time, np.zeros(time.size), label=lab)
            else:
                plt.plot(time, template.select(station=sta[s])[c].data, label=lab)
            #plt.axvline(time[time.size/2], color='k', ls='--')
            plt.xlim((time[0], time[-1]))
            plt.yticks([])
            plt.xticks([])
            plt.legend(loc='upper left', frameon=False, handlelength=0.1, borderpad=0.)
            if s == ns-1:
                plt.xlabel('Time (s)')
                xpos = np.arange(0, time.size, np.int32(10.*autodet.cfg.sampling_rate))
                xticks = [str(float(X)/autodet.cfg.sampling_rate) for X in xpos]
                plt.xticks(xpos, xticks)
    plt.subplots_adjust(bottom = 0.06, top = 0.94, hspace = 0.04, wspace = 0.12)
    plt.suptitle('Template {:d}, location: lat {:.2f}, long {:.2f}, depth {:.2f}km ($\Delta r=${:.2f}km)'\
                   .format(template.metadata['template_idx'], \
                           template.metadata['latitude'], \
                           template.metadata['longitude'], \
                           template.metadata['depth'], \
                           template.metadata['loc_uncertainty']), fontsize=24)
    if show:
        plt.show()