Exemplo n.º 1
0
def my_hht(data):
    X = np.sum(data, axis=1)
    X = X / len(data[0])
    plt.figure("hht_8")
    # X_0=hhtu.boundary_conditions(X,np.arange(len(X)))

    imfs = pyeemd.ceemdan(
        X)  # EEMD处理部分,可得到imf(本证模态函数),所有参数均使用默认值即可,ceemdan是一种更优于eemd的数据处理方法,
    #其增添的噪声数据会自动选择当前情况下最好的噪声进行运算,但是运算时间会略微加长

    # 画出ceemdan运算后所有imf的波形图
    # plot_imfs(imfs,plot_splines=False)

    #画出hht后数据的散点图
    plot_num = np.ceil((len(imfs) - 1) / 2)
    for i in range(len(imfs) - 1):
        hilbert_1 = hilbert(imfs[i])
        hilbert_out.append(hilbert(imfs[i]))
        plt.subplot(plot_num, 2, i + 1)
        count = i
        print("hht_" + str(count))
        plt.plot(hilbert_1, label="hht_" + str(count))
        plt.legend(loc='best',
                   frameon=False,
                   bbox_to_anchor=(0.5, 0.5),
                   ncol=3)
        plt.ylabel("amtitude")
    plt.show()
    return hilbert_out
Exemplo n.º 2
0
 def test_random_odd(self):
     for n in [33, 65, 55]:
         f = random((n, ))
         af = sum(f, axis=0) / n
         f = f - af
         assert_almost_equal(sum(f, axis=0), 0.0)
         assert_array_almost_equal(ihilbert(hilbert(f)), f)
         assert_array_almost_equal(hilbert(ihilbert(f)), f)
Exemplo n.º 3
0
 def test_random_odd(self):
     for n in [33,65,55]:
         f = random((n,))
         af = sum(f,axis=0)/n
         f = f-af
         assert_almost_equal(sum(f,axis=0),0.0)
         assert_array_almost_equal(ihilbert(hilbert(f)),f)
         assert_array_almost_equal(hilbert(ihilbert(f)),f)
Exemplo n.º 4
0
 def test_definition(self):
     for n in [16, 17, 64, 127]:
         x = arange(n) * 2 * pi / n
         y = hilbert(sin(x))
         y1 = direct_hilbert(sin(x))
         assert_array_almost_equal(y, y1)
         assert_array_almost_equal(hilbert(sin(2 * x)),
                                   direct_hilbert(sin(2 * x)))
Exemplo n.º 5
0
 def test_definition(self):
     for n in [16,17,64,127]:
         x = arange(n)*2*pi/n
         y = hilbert(sin(x))
         y1 = direct_hilbert(sin(x))
         assert_array_almost_equal(y,y1)
         assert_array_almost_equal(hilbert(sin(2*x)),
                                   direct_hilbert(sin(2*x)))
Exemplo n.º 6
0
def xcorrComplex(a, b):
    a = fftpack.hilbert(a) * 1j + a
    b = fftpack.hilbert(b) * 1j + b
    la = a.size
    lb = b.size
    c = np.zeros(la - lb + 1).astype(np.complex)
    for i in range(la - lb + 1):
        tc = (a[i:(i + lb)] * b[0:(0 + lb)].conj()).sum()
        c[i] = tc
    return c
Exemplo n.º 7
0
def calc_phase(real_resp, real_torque):
    """Calculate the phase difference in radians between the displacement and 
    the torque using a Hilbert transform. Signal MUST be monochromatic, 
    so filter first if necessary.
    :param real_resp: The displacement, as an array.
    :param real_torque: The analytic torque, as an array."""
    # Make sure y and torque are complex and not absolute-valued.
    hil_y = f.hilbert(real_resp)
    hil_torque = f.hilbert(real_torque)
    print(np.array([hil_y, hil_torque]))
    phases = -np.angle(hil_y / hil_torque)  # also calculate average and error.
    return h.combine_quantities(phases, operation='mean')
def signal_IQ(signal):
    '''transfrom signal to IQ'''
    Q = np.zeros(signal.shape)
    for i in range(signal.shape[0]):
        Q[i, :] += fftpack.hilbert(signal[i, :])
    IQ = np.hstack((signal, Q))
    return IQ
Exemplo n.º 9
0
def analytic_signal(x):
    """ A short-cut assuming that the incoming signal is reasonable
    e.g. fairly pure sinusoid.
    So far this has no strategy to minimize fourier transform time.
    """ 
    x = x - np.mean(x)
    return(x+1j*hilbert(x))
Exemplo n.º 10
0
    def envelope(self):
        '''
        Computes the envelope of the trace. 
        '''
        from scipy.fftpack import hilbert

        self.values = 20 * np.log10(np.abs(hilbert(self.values)))
Exemplo n.º 11
0
 def envelop(self, data, fs, low_cutoff, high_cutoff):
     filtered_data = self.filter_data(data, fs, low_cutoff, high_cutoff)
     hx = hilbert(filtered_data)
     x = np.sqrt(filtered_data**2 + hx**2)
     x = x - np.mean(x)
     fre, am = self.fourier_transform(x, fs)
     return fre, am, x
Exemplo n.º 12
0
def get_period(signal, signal_sr):
    """Extract the period from the the provided signal

    :param signal: the signal to extract the period from
    :type signal: numpy.ndarray
    :param signal_sr: the sampling rate of the input signal
    :type signal_sr: integer
    :return: a vector containing the signal period
    :rtype: numpy.ndarray
    """

    # perform a sanity check
    if signal is None:
        raise ValueError("Input signal cannot be None")

    # transform the signal to the hilbert space
    hy = hilbert(signal)

    ey = np.sqrt(signal**2 + hy**2)
    min_time = 1.0 / signal_sr
    tot_time = len(ey) * min_time
    pow_ft = np.abs(fft(ey))
    peak_freq = pow_ft[3:int(len(pow_ft) / 2)]
    peak_freq_pos = peak_freq.argmax()
    peak_freq_val = 2 * pi * (peak_freq_pos + 2) / tot_time
    period = 2 * pi / peak_freq_val

    return np.array([period])
Exemplo n.º 13
0
def execute(context):

    if not context.data is None:
        context.data = np.sqrt(context.data**2 +
                               fftpack.hilbert(context.data)**2)

    context.prev = __name__
Exemplo n.º 14
0
def analytic_signal(x):
    """ A short-cut assuming that the incoming signal is reasonable
    e.g. fairly pure sinusoid.
    So far this has no strategy to minimize fourier transform time.
    """
    x = x - np.mean(x)
    return (x + 1j * hilbert(x))
Exemplo n.º 15
0
 def bench_random(self):
     print
     print ' Hilbert transform of periodic functions'
     print '========================================='
     print ' size  | optimized |    naive'
     print '-----------------------------------------'
     for size,repeat in [(100,1500),(1000,300),
                         (256,1500),
                         (512,1000),
                         (1024,500),
                         (2048,200),
                         (2048*2,100),
                         (2048*4,50),
                         ]:
         print '%6s' % size,
         sys.stdout.flush()
         x = arange (size)*2*pi/size
         if size<2000:
             f = sin(x)*cos(4*x)+exp(sin(3*x))
         else:
             f = sin(x)*cos(4*x)
         assert_array_almost_equal(hilbert(f),direct_hilbert(f))
         print '| %9.2f' % measure('hilbert(f)',repeat),
         sys.stdout.flush()
         print '| %9.2f' % measure('direct_hilbert(f)',repeat),
         sys.stdout.flush()
         print ' (secs for %s calls)' % (repeat)
Exemplo n.º 16
0
 def bench_random(self):
     print()
     print(' Hilbert transform of periodic functions')
     print('=========================================')
     print(' size  | optimized |    naive')
     print('-----------------------------------------')
     for size,repeat in [(100,1500),(1000,300),
                         (256,1500),
                         (512,1000),
                         (1024,500),
                         (2048,200),
                         (2048*2,100),
                         (2048*4,50),
                         ]:
         print('%6s' % size, end=' ')
         sys.stdout.flush()
         x = arange(size)*2*pi/size
         if size < 2000:
             f = sin(x)*cos(4*x)+exp(sin(3*x))
         else:
             f = sin(x)*cos(4*x)
         assert_array_almost_equal(hilbert(f),direct_hilbert(f))
         print('| %9.2f' % measure('hilbert(f)',repeat), end=' ')
         sys.stdout.flush()
         print('| %9.2f' % measure('direct_hilbert(f)',repeat), end=' ')
         sys.stdout.flush()
         print(' (secs for %s calls)' % (repeat))
Exemplo n.º 17
0
    def get_iprobe(self, leakage=None, t_comp=None):
        """ main purpose is to subtract leakage currents
        Will use the full data, as the t_range is meant to be plasma interval
        returns a copy of the measured courremt, overwritten with the
        corrected iprobe
        """
        # obtain leakage estimate
        if t_comp is None:
            t_comp = self.t_comp
        FFT_size = nice_FFT_size(len(self.imeasfull.timebase), -1)
        self.iprobefull = self.imeasfull.copy()
        self.sweepQ = []  # will keep these for synchronous sampling
        input_leakage = leakage
        for (c, chan) in enumerate(self.imeasfull.channels):
            if self.select is not None and c not in self.select:
                continue
            leakage = input_leakage
            cname = chan.config_name
            sweepV = self.vcorrfull.signal[self.vlookup[self.vassoc[c]]][0:FFT_size]
            sweepQ = hilbert(sweepV)
            self.sweepQ.append(sweepQ)  # save for synchronising segments (it is smoothed)

            # these attempts to make it accept a single channel are only partial
            imeas = self.imeasfull.signal[c] # len(self.imeasfull.channels) >1 else self.imeasfull.signal
            tb = self.imeasfull.timebase

            w_comp = np.where((tb>=t_comp[0]) & (tb<=t_comp[1]))[0]
            if len(w_comp) < 2000:

                raise ValueError('Not enough points {wc} t_comp - try {tt}'
                    .format(tt=np.round([tb[0], tb[0] + t_comp[1]-t_comp[0]],3),
                            wc=len(w_comp)))
            ns = len(w_comp)
            wind = np.blackman(ns)
            offset = np.mean(wind * imeas[w_comp])/np.mean(wind)
            sweepVFT = np.fft.fft(AC(sweepV[w_comp]) * wind)
            imeasFT = np.fft.fft(AC(imeas[w_comp]) * wind)
            ipk = np.argmax(np.abs(sweepVFT)[0:ns//2])  # avoid the upper one
            comp = imeasFT[ipk]/sweepVFT[ipk]

            #print('leakage compensation factor = {r:.2e} + j{i:.2e}'
            #      .format(r=np.real(comp), i=np.imag(comp)))
            print('{u}sing computed leakage comp factor = {m:.2e} e^{p:.2f}j'
                  .format(u = ["Not u", "U"][leakage is None],
                          m=np.abs(comp), p=np.angle(comp)))
            if leakage is None:
                leakage = [np.real(comp), np.imag(comp)]

            # find the common length - assuming they start at the same time????
            comlen = min(len(self.imeasfull.timebase),len(self.vmeasfull.timebase),len(sweepQ))
            # put signals back into rdata (original was copied by reduce_time)
            # overwrite - is this OK?
            self.iprobefull.signal[c] = self.iprobefull.signal[c]*0.  # clear it
            # sweepV has a DC component! beware
            self.iprobefull.signal[c][0:comlen] = self.imeasfull.signal[c][0:comlen]-offset \
                                        - sweepV[0:comlen] * leakage[0] - sweepQ[0:comlen] * leakage[1]
            # remove DC cpt (including that from the compensation sweepV)
            offset = np.mean(wind * self.iprobefull.signal[c][w_comp])/np.mean(wind)
            self.iprobefull.signal[c][0:comlen] -= offset
Exemplo n.º 18
0
def get_envelop(data):
    """
	using Hilbert for envelop detection:
	data: data to be analysed
	"""
    hilbert_transform = hilbert(data)
    envelop = np.sqrt(data**2 + hilbert_transform**2)
    return envelop
def feature_rj(y):         #[feature1, f2, f3] = rj(noise_bpsk, fs)
    h = fftpack.hilbert(y)   # hilbert变换
    z = np.sqrt(y**2 + h**2)   # 包络
    m2 = np.mean(z**2)     # 包络的二阶矩
    m4 = np.mean(z**4)     # 包络的四阶矩
    r = abs((m4-m2**2)/m2**2)
    Ps = np.mean(y**2)/2
    j = abs((m4-2*m2**2)/(4*Ps**2))
    return (r,j)
Exemplo n.º 20
0
 def test_tilbert_relation(self):
     for n in [16,17,64,127]:
         x = arange(n)*2*pi/n
         f = sin(x)+cos(2*x)*sin(x)
         y = hilbert(f)
         y1 = direct_hilbert(f)
         assert_array_almost_equal(y,y1)
         y2 = tilbert(f,h=10)
         assert_array_almost_equal(y,y2)
Exemplo n.º 21
0
 def test_tilbert_relation(self):
     for n in [16, 17, 64, 127]:
         x = arange(n) * 2 * pi / n
         f = sin(x) + cos(2 * x) * sin(x)
         y = hilbert(f)
         y1 = direct_hilbert(f)
         assert_array_almost_equal(y, y1)
         y2 = tilbert(f, h=10)
         assert_array_almost_equal(y, y2)
Exemplo n.º 22
0
def emd(signal):
    emd_cls = EEMD()
    imfs = emd_cls(signal)
    vkur = np.zeros(len(signal))
    temp = [imf for imf in imfs if stats.kurtosis(imf) > 0]
    for imf in temp:
        vkur += imf
    hbSignal = abs(fftpack.hilbert(vkur))

    return hbSignal
Exemplo n.º 23
0
def frequency_estimate_ml(x):
    x_ = scifft.hilbert(x)
    x_ = noiseWhite(x_)
    datalen = len(x_)

    for i in range(0, datalen):
        x_[i] = np.math.log(abs(x_[i]))/(i+1)
    averge_ = np.average(x_)
    result_ = abs(averge_)
    return result_
Exemplo n.º 24
0
def hilb(x):
    xsz = x.size
    lxsz = math.log(xsz) / math.log(2)
    if (lxsz != int(lxsz)):
        x = x.copy(
        )  # to resize must make x a local copy of the externally referenced x
        x.resize(2**(int(lxsz) + 1))
    H = hilbert(x)
    H.resize(xsz)  # only needed if stretched
    return [abs(H), angle(H)]
Exemplo n.º 25
0
 def test_random_even(self):
     for n in [32, 64, 56]:
         f = random((n, ))
         af = sum(f, axis=0) / n
         f = f - af
         # zeroing Nyquist mode:
         f = diff(diff(f, 1), -1)
         assert_almost_equal(sum(f, axis=0), 0.0)
         assert_array_almost_equal(direct_hilbert(direct_ihilbert(f)), f)
         assert_array_almost_equal(hilbert(ihilbert(f)), f)
Exemplo n.º 26
0
 def test_random_even(self):
     for n in [32,64,56]:
         f = random((n,))
         af = sum(f,axis=0)/n
         f = f-af
         # zeroing Nyquist mode:
         f = diff(diff(f,1),-1)
         assert_almost_equal(sum(f,axis=0),0.0)
         assert_array_almost_equal(direct_hilbert(direct_ihilbert(f)),f)
         assert_array_almost_equal(hilbert(ihilbert(f)),f)
Exemplo n.º 27
0
def frequency_estimate_ml(x):
    x_ = scifft.hilbert(x)
    x_ = noiseWhite(x_)
    datalen = len(x_)

    for i in range(0, datalen):
        x_[i] = np.math.log(abs(x_[i])) / (i + 1)
    averge_ = np.average(x_)
    result_ = abs(averge_)
    return result_
Exemplo n.º 28
0
def Get_R_parallel(num, data):
    R_matrix = numpy.zeros([num, num], complex)
    p = Pool(4)
    for v in range(num):
        hilbert_v = p.starmap(complex,
                              zip(data[:, v], -fftpack.hilbert(data[:, v])))
        R_matrix[v, :] = p.map(numpy.mean,
                               [hilbert_v * data[:, l] for l in range(num)])
    p.close()
    p.join()
    return R_matrix
Exemplo n.º 29
0
def hilbert_transform(imfs, dt):
    H = []
    F = []
    for imf in imfs:
        hx = fftpack.hilbert(imf)
        amplitude = np.sqrt(imf**2 + hx**2)[1:]
        dw, extend_w = extend_arctan(hx, imf)
        frequence = np.round(dw / dt / (2 * np.pi)).astype(int)
        H.append(amplitude)
        F.append(frequence[1:])
    hh = hh_spectrum(H, F)
    return hh
Exemplo n.º 30
0
def lock_in_process(S1, S2):
	if len(S1) != len(S2):
		raise ValueError('Data sets lengths mismatch!')
	if len(S1) < 100:
		raise ValueError('Miminum number of points is 100')
	S1 = np.array(S1)
	S2 = np.array(S2)
	Squad = fftpack.hilbert(S1) # Quadrature signal
	Sdf = np.dot(S1, S2)
	Sdc = np.dot(Squad, S2)
	phi = np.arctan(Sdc/Sdf)
	return phi
def plotEnvelope(data):
    pl.subplot(221)
    original_data = np.array(data)
    pl.plot(original_data, label=u"Original_data")
    pl.legend()
    pl.subplot(222)
    hx_original = fftpack.hilbert(original_data)
    envelope1 = np.sqrt(original_data**2 + hx_original**2)
    pl.plot(envelope1, "r", linewidth=2, label=u"Envelope1")
    pl.title(u"Hilbert Transform")
    pl.legend()
    pl.show()
Exemplo n.º 32
0
    def envelope(self):
        '''
        NEEDS TESTING
        
        Computes the envelope of the traces using the Hilbert transform. 
        '''
        from scipy.fftpack import hilbert

        self.info('Applying Hilbert transform ...')
        for trace in range(self.traces):
            self.data[:, trace] = np.abs(hilbert(self.data[:, trace]))
        self.done()
Exemplo n.º 33
0
def hilbertEnv(data, step):
    res = np.array([])
    size = data[1].size - step - 1
    print(data[1].size)
    print(size)
    i = 0
    while i < size:
        aux = hilbert(data[1][i:i + step])
        print(data[0][i])
        print(aux)
        res = np.append(res, aux)
        i += 100
    return np.append([range(len(res))], [res], axis=0)
Exemplo n.º 34
0
def envelop(x_en, display=0):
    x_en_a = x_en - x_en.mean()
    hx_a = fftpack.hilbert(x_en_a)
    x_en_up = np.sqrt(x_en_a**2 + hx_a**2) + x_en.mean()
    # x_en_a_dw = -x_en_a
    # hx_a_dw = fftpack.hilbert(x_en_a_dw)
    # x_en_dw = -np.sqrt(x_en_a_dw**2 + hx_a_dw**2)+ x_en.mean()
    # x_en_mean = (x_en_dw+x_en_up)/2
    if display:
        plt.plot(x_en, "b", linewidth=2, label='signal')
        plt.plot(x_en_up, "r", linewidth=2, label='envelop')
        plt.legend()
        plt.show()
    return x_en_up
Exemplo n.º 35
0
def analytic_phase(x, t=None, subint=None):
    """ gets the phase from an amazing variety of signals
    http://en.wikipedia.org/wiki/Analytic_signal
    subinterval idea is not debugged and is probably unnecessary
    may shorten data?
    """
    from scipy.fftpack import hilbert
    from pyfusion.utils import fix2pi_skips
    from numpy import zeros, arctan2
    # this subinterval idea does not seem necessary and is not debugged
    if subint != None:
        subint=powerof2(subint)
        nsubints = int(len(x)/subint)
        phs = zeros([nsubints, subint])
        for i in range(nsubints):
            xsub = x[i*subint:(i+1)*subint]
            phs[i,:] = arctan2(xsub, hilbert(xsub))
        phi = phs.flatten()
    else:
        y=hilbert(x)  # use hilbert twice just to remove DC (lazy)
        phi = arctan2(y, hilbert(y))

    return(fix2pi_skips(phi, sign='+'))
Exemplo n.º 36
0
def envelope(data):
    """
    Envelope of a function.
    Computes the envelope of the given function. The envelope is determined by
    adding the squared amplitudes of the function and it's Hilbert-Transform
    and then taking the square-root. (See [Kanasewich1981]_)
    The envelope at the start/end should not be taken too seriously.
    :type data: numpy.ndarray
    :param data: Data to make envelope of.
    :return: Envelope of input data.
    """
    hilb = hilbert(data)
    data = (data ** 2 + hilb ** 2) ** 0.5
    return data
Exemplo n.º 37
0
def analytic_phase(x, t=None, subint=None):
    """ gets the phase from an amazing variety of signals
    http://en.wikipedia.org/wiki/Analytic_signal
    subinterval idea is not debugged and is probably unnecessary
    may shorten data?
    """
    from scipy.fftpack import hilbert
    from pyfusion.utils import fix2pi_skips
    from numpy import zeros, arctan2
    # this subinterval idea does not seem necessary and is not debugged
    if subint != None:
        subint=powerof2(subint)
        nsubints = int(len(x)/subint)
        phs = zeros([nsubints, subint])
        for i in range(nsubints):
            xsub = x[i*subint:(i+1)*subint]
            phs[i,:] = arctan2(xsub, hilbert(xsub))
        phi = phs.flatten()
    else:
        y=hilbert(x)  # use hilbert twice just to remove DC (lazy)
        phi = arctan2(y, hilbert(y))

    return(fix2pi_skips(phi, sign='+'))
Exemplo n.º 38
0
def GetSingal(context)->int:
    
    price_df = history(count=g.N + 1, unit='1d', field='close', security_list=g.index_code)
    
    # 对昨日收盘价去噪
    denoised_price = wave_transform(price_df[g.index_code],'db4','sym',4,1,4)
    
    diff_price = denoised_price.diff()
    diff_price = diff_price.dropna()
    
    # 希尔伯特周期 滚动防止 前视偏差 
    hilbert = fftpack.hilbert(diff_price)
    
    return np.sign(hilbert)[-1] # 1为持仓 其他空仓
Exemplo n.º 39
0
def compressed_sensing_1VD( fid, mask, num_iter=500, factor=0.95, tol = 0.01, maxPeaks=2 ):
    
    sss = numpy.zeros( len(fid))
    sss0 = numpy.zeros( len(fid))
    
    final_iter_value  = 0
    final_tol_value   = 0
    final_numpeaks_value = 0

    fid1 = fid.copy()
    tol_diff = (numpy.sqrt(((abs(fid1)).sum())/32.))
    
    k=0
    kkk = []
    rrr = fftpack.fft(fid1)
    rss0 = []
    rss0.append(abs(rrr).sum())
    
    tol0 = abs(rrr).sum()
    tol_diff = ( tol0 - abs(rrr).sum() )*100.0 / tol0
    while (tol_diff < tol) and (k < num_iter) and numPeaks( sss ) <maxPeaks:
        
        sss0 = 1.0*sss
        
        rrr = fftpack.fft(fid1)
        m1 = max(rrr.real)
        
        sss_max_old = sss.max()
               
        for i,r in enumerate(rrr):
            if r.real > m1* factor:
                sss[i] = sss[i]+rrr[i].real
                rrr[i] = complex(m1*factor)
        sss_max = sss.max()
        
        rrr_iii = fftpack.hilbert( rrr.real )
        
        rrr = rrr.real + 1j * rrr_iii
        
        fid1 = fftpack.ifft(rrr)
        
        fid1 *= mask
        tol_diff = ( tol0 - abs(rrr).sum() )*100.0 / tol0
        k +=1

    final_iter_value = k
    final_numpeaks_value = numPeaks(sss)
    final_tol_value = tol_diff
    
    return( sss0, [final_iter_value, final_numpeaks_value, final_tol_value ] )
 def reconstruct(self, paData):
     if paData.ndim == 2:
         (nSamples, nSteps) = paData.shape
         paData = np.reshape(paData, (nSamples, nSteps, 1))
     (nSamples, nSteps, zSteps) = paData.shape
     reImg1 = np.copy(super().reconstruct(paData))
     # take 90-degree phase shift
     import scipy.fftpack as spfp
     for z in range(zSteps):
         for n in range(nSteps):
             paData[:, n, z] = spfp.hilbert(paData[:, n, z])
     reImg2 = np.copy(super().reconstruct(paData))
     self.reImg = np.sqrt(reImg1 ** 2 + reImg2 ** 2)
     return self.reImg
Exemplo n.º 41
0
def Kosambi_Hilbert_phase(X, sampling_rate, passband=None, index=0, moving_window=False):

	y = Kosambi_Hilbert_torsion(X, sampling_rate, passband=passband, index=index, moving_window=moving_window)
	Hy = hilbert(y)

	radius = np.sqrt(y**2+Hy**2)
	phase = np.arctan2(y, Hy)
	phi_u = unmod(phase)

	if phi_u[-1]-phi_u[0] < 0.:	# if phase shrinks, reverse it.
		phase = -phase

	phase = np.mod(phase, pi2)

	return phase, radius
Exemplo n.º 42
0
def envelope(data):
    """
    Envelope of a function.

    Computes the envelope of the given function. The envelope is determined by
    adding the squared amplitudes of the function and it's Hilbert-Transform
    and then taking the square-root. (See [Kanasewich1981]_)
    The envelope at the start/end should not be taken too seriously.

    :param data: Data to make envelope of, type numpy.ndarray.
    :return: Envelope of input data.
    """
    hilb = hilbert(data)
    data = (data ** 2 + hilb ** 2) ** 0.5
    return data
Exemplo n.º 43
0
    def compute_envelope(self, amplitude):
        '''
        This method computes the envelope of the waveform. The method used here is described
        in Dr. Skliar's paper entitled "Anisotropic Diffusion Filter for Robust Timing of
        Ultrasound Echoes"
        '''

        #compute the hilbert of amplitude
        h_amp = fft.hilbert(amplitude)

        #compute the envelope "see the method in the paper for more detail."
        A_of_t = (amplitude**2 + h_amp**2)**(1/2)

        #return the envelope
        return A_of_t
Exemplo n.º 44
0
    def process_data(self):
        self.hilbert.values = fftpack.hilbert(self.emg_bruto.values)
        self.hilbert_retificado.values = np.abs(self.hilbert.values)
        self.envoltoria.values = filtfilt(self.b, self.a,
                                          self.hilbert_retificado.values)
        self.detection_sites = self.envoltoria.values > self.threshold.values[0]

        time_inicio = self.qnt_points - 1
        for n in range(1, self.qnt_points):
            #subida
            if self.detection_sites[n] and not self.detection_sites[n - 1]:
                time_inicio = n  # Armazena o indes de inicio da contracao
            if not self.detection_sites[n] and self.detection_sites[n - 1]:
                time_end = n
                self.contraction_region.setRegion([time_inicio, time_end])
Exemplo n.º 45
0
def build_single_raw_data(num, channel):
    '''
    :param num : 要提取哪一个人
    :param channel: 要提取的通道数据
    :return: 将该通道的数据转变为频谱的包络输出
    '''
    load_data = sio.loadmat(raw_path[num])
    load_maxtrix = load_data['data']
    shape = load_maxtrix.shape
    len = shape[0]
    pre_train = load_maxtrix[0:len, channel]
    pre_train = np.array(pre_train)
    pre_train = np.reshape(pre_train, (-1))
    hx = fftpack.hilbert(pre_train)
    envelop = np.sqrt(pre_train**2 + hx**2)
    return envelop
Exemplo n.º 46
0
def envelope(x):
    """Computes the envelope of a seismic signal.

    The envelope, e(n), of a signal x(n), is calculated as:

        e(n) = (x(n) ** 2 + h(n) ** 2) ** 0.5

    where h(n) is the Hilbert Transform of x(n)

    Args:
        x: array of data.

    Returns:
        out: Envelope of x, numpy array type.
    """
    x_mean = x.mean()
    x_norm = x - x_mean
    return ((x_norm ** 2 + fftpack.hilbert(x_norm) ** 2) ** 0.5) + x_mean
Exemplo n.º 47
0
def _Kosambi_Hilbert_torsion(X, Filter, index=0):
	# X[time, channel] should be X_j(t_i)

	if X.shape[1] == 1: return X[:, 0]

	# declarations and initializations
	X = np.asarray(X, dtype=float)
	channels = range(X.shape[1])
	Y = np.zeros((X.shape[0], 2*X.shape[1]+1), float)			# Y[time,  channel], X, and H(X) with Y[:, 0] as the reference channel.
	Yf = np.zeros(Y.shape, float)						# Filter(ed) version of Y.


	X, reference_amplitude = normalize(X, Filter, index=index)
	Y[:, 0] = X[:, index]							# save the reference channel to vector zero
	channels.pop(index)							# reference channel is treated separately.


	for (c, channel) in enumerate(channels):
		Y[:, 1+2*c] = X[:, channel]
		Y[:, 1+2*c+1] = hilbert(Y[:, channel])


	for i in xrange(Y.shape[1]):
		Yf[:, i] = Filter(Y[:, i])


	pcanode = mdp.nodes.PCANode(svd=True)	# this pcanode is used by the function below. (it's actually some static variable)
	pcanode.execute(Yf)			# get the principle components from Yf
	Proj = pcanode.get_projmatrix()		# ...and their projection matrix.

	if Proj[0, 0] < 0: Proj = -Proj		# ... why do I need to do this?


	KHT_component = np.dot(Y, Proj)[:, 0]		# apply them to Y!!!
	pca_amplitude = np.sqrt(signalAndNoise(KHT_component, Filter)[0])

	return reference_amplitude/pca_amplitude * KHT_component 
Exemplo n.º 48
0
def extract_ts_features(ts, w_length = False, num_of_windows = False, 
                                overlap = False, option = "mean", param = 2):
    # Extract features from time series
    # option = 'mean'      : mean of each window
    #          'median'    : median
    #          'std'       : std
    #          'kurtosis'  : kurtosis
    #          'gmean'     : geometric mean
    #          'hmean'     : harmonic mean
    #          'moment'    : nth moment
    #          'skew'      : skewness
    #          'max'       : max
    #          'min'       : min
    #          'variation' : coefficient of variation
    #          'snr'       : mean divided by std
    #          'sem'       : standard error of the mean
    #          'fft'       : fft
    #          'ifft'      : inverse fft
    #          'rfft'      : fft of real series (complex conjugates discarded)
    #          'psd'       : power spectral density
    #          'dct'       : discrete cosine transform
    #          'hilbert'   : hilbert transform
    #          'relmaxind' : relative maxima indices
    #          'relmax'    : relative maxima values
    #          'relminind' : relative minima indices
    #          'relmin'    : relative minima values
    #          'zerocross' : indices of zero crossing before the crossing
    #          'zcr'       : zero crossing rate
    #
    # Example: extract_ts_features(np.arange(1000), w_length = 154, 
    #                                        overlap = 0.3, option = "mean")
    
    if option == "mean":
        features = np.mean(sliding_window(ts, w_length = w_length, 
                    num_of_windows = num_of_windows, overlap = overlap), -1)
    elif option == "median":
        features = np.median(sliding_window(ts, w_length = w_length, 
                    num_of_windows = num_of_windows, overlap = overlap), -1)
    elif option == "std":
        features = np.std(sliding_window(ts, w_length = w_length, 
                    num_of_windows = num_of_windows, overlap = overlap), -1)
    elif option == "kurtosis":
        features = kurtosis(sliding_window(ts, w_length = w_length, 
                    num_of_windows = num_of_windows, overlap = overlap), -1)
    elif option == "gmean":
        features = gmean(sliding_window(ts, w_length = w_length, 
                    num_of_windows = num_of_windows, overlap = overlap), -1)
    elif option == "hmean":
        features = hmean(sliding_window(ts, w_length = w_length, 
                    num_of_windows = num_of_windows, overlap = overlap), -1)
    elif option == "moment":
        features = moment(sliding_window(ts, w_length = w_length, 
                num_of_windows = num_of_windows, overlap = overlap), param, -1)
    elif option == "skew":
        features = skew(sliding_window(ts, w_length = w_length, 
                    num_of_windows = num_of_windows, overlap = overlap), -1)
    elif option == "max":
        features = np.max(sliding_window(ts, w_length = w_length, 
                    num_of_windows = num_of_windows, overlap = overlap), -1)
    elif option == "min":
        features = np.min(sliding_window(ts, w_length = w_length, 
                    num_of_windows = num_of_windows, overlap = overlap), -1)
    elif option == "variation":
        features = variation(sliding_window(ts, w_length = w_length, 
                    num_of_windows = num_of_windows, overlap = overlap), -1)
    elif option == "snr":
        features = signaltonoise(sliding_window(ts, w_length = w_length, 
                    num_of_windows = num_of_windows, overlap = overlap), -1)
    elif option == "sem":
        features = sem(sliding_window(ts, w_length = w_length, 
                    num_of_windows = num_of_windows, overlap = overlap), -1)
    elif option == "fft":
        features = fft(sliding_window(ts, w_length = w_length, 
                num_of_windows = num_of_windows, overlap = overlap), param, -1)
    elif option == "ifft":
        features = ifft(sliding_window(ts, w_length = w_length, 
                num_of_windows = num_of_windows, overlap = overlap), param, -1)
    elif option == "rfft":
        features = rfft(sliding_window(ts, w_length = w_length, 
                num_of_windows = num_of_windows, overlap = overlap), param, -1)
    elif option == "psd": # Fix this!
        features = periodogram(sliding_window(ts, w_length = w_length, 
                num_of_windows = num_of_windows, overlap = overlap), param, -1)
    elif option == "dct":
        features = dct(sliding_window(ts, w_length = w_length, 
                num_of_windows = num_of_windows, overlap = overlap), param, -1)
    elif option == "hilbert": # Fix this
        features = hilbert(sliding_window(ts, w_length = w_length, 
                    num_of_windows = num_of_windows, overlap = overlap), -1)
    elif option in ["relmaxind", "relmax"]:
        windows = sliding_window(ts, w_length = w_length, 
                            num_of_windows = num_of_windows, overlap = overlap)
        features = np.ones((windows.shape[0:2]), dtype=object)
        for i in range(windows.shape[0]):
            for j in range(windows.shape[1]):
                features[i,j] = argrelmax(windows[i,j])[0]
        if option == "relmax":
            for i in range(windows.shape[0]):
                for j in range(windows.shape[1]):
                    features[i,j] = windows[i,j][features[i,j]]
    elif option in ["relminind", "relmin"]:
        windows = sliding_window(ts, w_length = w_length, 
                            num_of_windows = num_of_windows, overlap = overlap)
        features = np.ones((windows.shape[0:2]), dtype=object)
        for i in range(windows.shape[0]):
            for j in range(windows.shape[1]):
                features[i,j] = argrelmin(windows[i,j])[0]
        if option == "relmin":
            for i in range(windows.shape[0]):
                for j in range(windows.shape[1]):
                    features[i,j] = windows[i,j][features[i,j]]
    elif option == "zerocross":
        sign = np.sign(ts).astype(int)
        sign[sign==0] = -1
        sign_change = np.diff(sign)
        features = np.where(sliding_window(sign_change, w_length = w_length, 
                        num_of_windows = num_of_windows, overlap = overlap))
    elif option == "zcr":
        sign = np.sign(ts).astype(int)
        sign[sign==0] = -1
        if len(sign.shape) == 1: # if ts is a vector
            sign_change = np.hstack((np.diff(sign), np.zeros((1)))).astype(int)
        else: #if ts is a matrix
            sign_change = np.hstack((np.diff(sign), 
                                    np.zeros((sign.shape[0],1)))).astype(int)
        
        '''if w_length:
            # w_length - 1 because diff() outputs 1 element shorter
            features = np.sum(np.abs(sliding_window(sign_change, 
            w_length = w_length, num_of_windows = num_of_windows, 
            overlap = overlap)) > 0.5, -1)/float(w_length) 
        else:'''
        windows = sliding_window(sign_change, w_length = w_length, 
                            num_of_windows = num_of_windows, overlap = overlap)
        features = np.sum(np.abs(windows)>0.5, -1)/float(windows.shape[-1])
    else:
        raise ValueError("No such option!")
    
    return features
Exemplo n.º 49
0
# replace by time overlap, using reduce time.
# always reduce time to make sure a copy is made.
# this reduction is to select the common area of the signals
sweepVmeas = sweep_data.signal
dats = [sweep_data, data]
common_start = max([x.timebase[0] for x in dats])
common_end = min([x.timebase[-1] for x in dats])
# the first reduce time
sweepVmeas_rdata = sweep_data.reduce_time([common_start, common_end])
imeas_rdata = data.reduce_time([common_start, common_end])
tb = imeas_rdata.timebase
print('timebase length = {l:,d}'.format(l=len(tb)))

# first need to shift IMeas relative to sweep if there is an offset.  
# Can use hilbert to shift the sweep instead.
sweepQ = hilbert(sweepVmeas_rdata.signal)
sweepV = sweepVmeas_rdata.signal
if dphi != 0:
    print('Warning - changing sweep phase this way corrupts the DC cpt!!!')
    sweepV = cos(dphi) * AC(sweepVmeas_rdata.signal) - sin(dphi) * sweepQ

imeas = imeas_rdata.signal

w_comp = np.where((tb>=t_comp[0]) & (tb<=t_comp[1]))[0]
ns = len(w_comp)
sweepVFT = np.fft.fft(AC(sweepV[w_comp]) * np.blackman(ns))
imeasFT = np.fft.fft(AC(imeas[w_comp]) * np.blackman(ns))
ipk = np.argmax(np.abs(sweepVFT)[0:ns//2])  # avoid the upper one
comp = imeasFT[ipk]/sweepVFT[ipk]

print('leakage compensation factor = {comp}'.format(comp=comp))
Exemplo n.º 50
0
def autopick (seismic):
  '''
  Automatically picks the highest-amplitude seismic signal on each trace, and then
  attempts to regularize the output header information by smoothing with a variety of
  B-spline methods.
  '''

  global globalWavelet
  if seismic.controlState != 2:
    return
  trackprogress(seismic)
  outbuf = seismic.trHeadersRec[seismic.userArg]

  sampr = seismic.reelHeaderRec['SAMP_RATE']/1000.

  if (globalWavelet == None):
    sf = segyread.SEGYFile('wavelet.su', isSU=True)
    globalWavelet = sf.readTraces()
    ns = sf.trhead[0]['ns']
    globalWavelet.shape = (ns,)
    del sf

  function = seismic.trData.copy()

  if (method[:3] == 'env'):
    for i, trace in enumerate(function):
      function[i,:] = sqrt(trace**2 + hilbert(trace)**2)
  elif (method[:3] == 'ear'):
    for i, trace in enumerate(function):
      function[i,:] = energyratio(trace, earwindow)

  #seismic.trHeadersRec['ENVELOPE'][:] = envelope
  #seismic.trHeadersRec['XCOR'][:] = xcor
  #seismic.trHeadersRec['XCOR'][:] = function

  offsets = seismic.trHeadersRec['OFFSET'].copy()

  with warnings.catch_warnings():
    warnings.simplefilter("ignore")
    offsets,monot,unmonot = unique1d(offsets,return_index=True,return_inverse=True)
  function = function[monot]

  pickbuf = offsets.copy()
  envpicks = offsets.copy()
  if (method[3:] == 'max'):
    #for i in range(len(function)):
    #  pickbuf[i] = argmax(function[i,:]) 
    pickbuf = argmax(function,axis=1) 
  elif (method[3:] == 'median'):
    #for i in range(len(function)):
    #  pickbuf[i] = median(flipud(argsort(function[i,:]))[:ltr])
    pickbuf = median(fliplr(argsort(function,axis=1))[:,:ltr],axis=1)
  elif (method[3:] == 'slope'):
    pickbuf = argmax(diff(gf(function,slopegparm),axis=1),axis=1)
  pickbuf[:] = sampr*pickbuf

  if (lmovel):
    pickbuf[:] = pickbuf - lmovel*offsets/10

  #try:
  splc = interpolate.splrep(offsets,pickbuf)
  splder = interpolate.splev(offsets,splc,der=1)
  #except ValueError:
  #  outbuf[:] = 0
  #  print "Failed to produce spline!"
  #  return

  splderenv = sqrt(splder**2 + hilbert(splder)**2)
  fp = argwhere(splderenv < derthresh*splderenv.mean())

  # Curve fitting method
  offsub = offsets[fp].reshape((len(fp),))
  pbsub = pickbuf[fp].reshape((len(fp),))

  splc = interpolate.splrep(offsub,pbsub,k=3,s=fitness*len(offsub))
  envpicks = interpolate.splev(offsets,splc)

  splcur = interpolate.splev(offsub,splc,der=2)
  splcurenv = sqrt(splcur**2 + hilbert(splcur)**2)
  keep = argwhere(splcurenv < curthresh*splcurenv.mean())

  offkep = offsub[keep].reshape((len(keep),))
  pbkep = pbsub[keep].reshape((len(keep),))

  splc = interpolate.splrep(offkep,pbkep,k=1,s=fitness*len(offkep))
  mask = zeros(envpicks.shape)
  mask[fp[keep]] = 1
  envpicks = interpolate.splev(offsets,splc) * mask * (abs(offsets)>offexclude)
  envpicks = envpicks * (envpicks > stexclude)

  # Envelope Autopicking Diagnostics
  if (showplot):
    figure()
    subplot(3,1,1)
    plot(offsets,splder,'b-',label='Function')
    plot(offsets,splderenv,'g-',label='Envelope')
    fill_between(offsets,splderenv,alpha=0.5,facecolor='green')
    plot(offsub,ones(len(offsub))*splderenv.mean()*derthresh,'r-', label='Threshold')
    xlabel('Offset (decimetres)')
    ylabel('Amplitude of First Derivative')
    legend(loc=0)

    subplot(3,1,2)
    plot(offsub,splcur,'b-', label='Function')
    plot(offsub,splcurenv,'g-', label='Envelope')
    fill_between(offsub,splcurenv,alpha=0.5,facecolor='green')
    plot(offsub,ones(len(offsub))*splcurenv.mean()*curthresh,'r-', label='Threshold')
    xlabel('Offset (decimetres)')
    ylabel('Amplitude of Second Derivative')
    legend(loc=0)

    subplot(3,1,3)
    plot(offsets,pickbuf,'r.', label='Initial Picks')
    plot(offsets,envpicks,'gx', label='Filtered Picks')
    xlabel('Offset (decimetres)')
    ylabel('Traveltime (ms)')
    legend(loc=0)
  
  # --------------------------------------------------------------------
  # Pick revision via local cross-correlation of traces
  if (refine == 'xcor'):

    envpicks = envpicks / sampr
    stepcor = []
    corenvelope = []
    shifts = []
    shiftor = len(seismic.trData[0])/2
    for i in xrange(len(seismic.trData)-1):
      stepcor.append(correlate(seismic.trData[i],seismic.trData[i+1],mode=1))
      corenvelope.append(gf(real(sqrt(stepcor[-1]**2 + hilbert(stepcor[-1])**2)),5))
      locs = flipud(corenvelope[-1].argsort())[:15]
      shifts.append(mean(corenvelope[-1][locs]*(shiftor-locs)/sum(corenvelope[-1][locs])))

    stepcor = array(stepcor)
    corenvelope = array(corenvelope)
    shifts = array(shifts)

    if (showplot and False):
      figure()
      subplot(3,1,1)
      imshow(stepcor.T)
      subplot(3,1,2)
      imshow(corenvelope.T)
      subplot(3,1,3)
      plot(shifts)

    # Remove spikes
    shifts = mf(shifts,5)
    newshifts = [0.]
    for i in xrange(len(seismic.trData)-1):
      newshifts.append(shifts[i]+newshifts[-1])
    newshifts = array(newshifts)

    fpicks = envpicks.copy()
    bpicks = envpicks.copy()
    newpicks = envpicks.copy()

    goodpicks = argwhere(envpicks != 0)
    
    #for i in xrange(1,len(envpicks)):
    #  if (((abs(fpicks[i] - fpicks[i-1])>jumpthresh) and (fpicks[i-1] != 0)) or (fpicks[i] == 0)):
    #    fpicks[i] = fpicks[i-1]+shifts[i-1]
    #  l = -(i+1)
    #  if (((abs(bpicks[l] - bpicks[l+1])>jumpthresh) and (bpicks[l+1] != 0)) or (bpicks[l] == 0)):
    #    bpicks[l] = bpicks[l+1]-shifts[l+1]
    #newpicks = (bpicks+fpicks)/2

    # Define some clever function on the forward and backward interpolations
    for i in xrange(len(goodpicks)-1):
      for j in xrange(goodpicks[i]+1,goodpicks[i+1]):
        fpicks[j] = fpicks[j-1] + shifts[j-1]
      for j in xrange(goodpicks[i+1]-1,goodpicks[i],-1):
        bpicks[j] = bpicks[j+1] - shifts[j]
      x0 = goodpicks[i]
      x1 = goodpicks[i+1]
      domain = linspace(0,pi/2,x1-x0+1)
      for j in xrange(goodpicks[i]+1,goodpicks[i+1]):
        newpicks[j] = sqrt((fpicks[j]*cos(domain[j-x0]))**2 + (bpicks[j]*sin(domain[j-x0]))**2)

    # Pick the first pick from the forward prediction or the blended version
    #newpicks = newpicks * (newpicks<fpicks) + fpicks * (fpicks<=newpicks)

    if (showplot):
      figure()
      ax = axes()#subplot(1,4,1)
      gray()
      imshow(seismic.trData.T, aspect='auto')
      axt = ax.axis()
      plot(envpicks, 'g.', label='Initial Pick')
      plot(fpicks,'b-', label='Forward Prediction')
      plot(bpicks,'y-', label='Backward Prediction')
      plot(newpicks,'r-', label='Blended')
      ax.axis(axt)
      ylabel('Sample')
      xlabel('Trace')
      title('Picking output overlaid on seismic gather')

    envpicks[:] = newpicks*sampr

  #ninstphase = []
  #finstphase = []
  #binstphase = []
  #for i,trace in enumerate(seismic.trData):
  #  npick = newpicks[unmonot][i]
  #  fpick = fpicks[unmonot][i]
  #  bpick = bpicks[unmonot][i]
  #  ninstphase.append(arctan2(hilbert(trace)[npick],trace[npick]))
  #  finstphase.append(arctan2(hilbert(trace)[fpick],trace[fpick]))
  #  binstphase.append(arctan2(hilbert(trace)[bpick],trace[bpick]))

  #ninstphase = array(ninstphase)
  #finstphase = array(finstphase)
  #binstphase = array(binstphase)

  if (showplot):
  #  figure()
  #  ax = axes()
  #  plot(offsets[unmonot],180*unwrap(ninstphase)/pi,'r.',label='Blended')
  #  plot(offsets[unmonot],gf(mf(180*unwrap(ninstphase)/pi,5),10),'r-',label='Smooth Blended')
  #  plot(offsets[unmonot],180*unwrap(finstphase)/pi,'b.',label='Forwards')
  #  plot(offsets[unmonot],gf(mf(180*unwrap(finstphase)/pi,5),10),'b-',label='Smooth Forwards')
  #  plot(offsets[unmonot],180*unwrap(binstphase)/pi,'y.',label='Backwards')
  #  plot(offsets[unmonot],gf(mf(180*unwrap(binstphase)/pi,5),10),'y-',label='Smooth Backwards')
  #  legend(loc=0)
  #  ylabel('Unwrapped phase (degrees)')
  #  xlabel('Offset (decimetres)')
  #  title('Instantaneous phase with offset (extracted at pick locations)')

  #  axisrange = axis()
  #  ticks = ax.get_yticks()
  #  ticks = range((int(ticks[0])/360 +1)*360,360,360)
  #  ax.set_yticks(ticks)
  #  grid(True)
  #  axis(axisrange)
    show()

  outbuf[:] = envpicks[unmonot]
  offsets = offsets[unmonot]

  if (lmovel):
    outbuf[:] = outbuf + offsets*lmovel/10
# -*- coding: utf-8 -*-
from scipy import fftpack
import numpy as np

x = np.random.rand(16)
y = fftpack.hilbert(x)

X = np.fft.fft(x)
Y = np.fft.fft(y)

print np.imag(Y/X)
Exemplo n.º 52
0
def compressed_sensing_1VD1b( fid, mask, num_iter=500, factor=0.95, maxPeaks=20 ):
    
#    print maxPeaks,num_iter
    sss = numpy.zeros( len(fid))
    sss_prev = numpy.zeros( len(fid))
    
    final_iter_value  = 0
    final_tol_value   = 0
    final_numpeaks_value = 0
    
#    numberOfpeaks = numpy.zeros(num_iter)
    numberOfpeaks = []
    fid1 = fid.copy()
    
    k=0
    rrr = fft(fid1)
    
#    print "Num Peaks", numPeaks( sss )
    
    noise_not_reached = True
#    while  (k < num_iter) and numPeaks( sss[k] ) < maxPeaks and noise_not_reached:

        
    while  noise_not_reached and numPeaks( sss ) <maxPeaks:
        
        sss_prev =  numpy.copy(sss)
        
        rrr = fft(fid1)
        m1 = max(rrr.real)*factor
               
        for i,r in enumerate(rrr):
            if r.real > m1:
                sss[i] = sss[i]+rrr[i].real-m1
                rrr[i] = complex(m1)
        rrr = rrr.real + 1j * hilbert( rrr.real )
        
        fid1 = ifft(rrr)*mask

        
        numberOfpeaks.append( numPeaks( sss ))

#        k +=1
#        print "k, num peaks", k,numberOfpeaks[-1]
        
        npeaks = numpy.array(numberOfpeaks)
    
#        ddd = abs(npeaks-numpy.roll(npeaks,-1))

#        ddd1 = ddd[:]-numpy.roll(ddd,-1)
#        ppp = (numpy.where(ddd[:-2]>0))[0]

#        ppp1 = ppp[1:]-ppp[:-1]
        
        noise_not_reached = True
#        print "ppp,ppp1",ppp,ppp1
        if k>=3:
            ddd = abs(npeaks[1:]-npeaks[:-1])
            ppp = (numpy.where(ddd>0))[0]
            ppp1 = ppp[1:]-ppp[:-1]
            for j,v in enumerate(ppp1):


                if v < 1000:
                    noise_not_reached = False 
                    print "reached noise level",v,npeaks[-1],k
                    break

        k += 1
        if k >  num_iter:
            print "reached max iterations"
            noise_not_reached = False
        
    final_iter_value = k

#    print k,
                    
    return( sss_prev, [final_iter_value, numberOfpeaks[-1] ] )
Exemplo n.º 53
0
def compressed_sensing_1VD1d( fid, mask, num_iter=500, factor=0.95, maxPeaks=20, peak_separation = 100, tolerance=1e-3 ):
    
    sss = numpy.zeros( len(fid))
    sss_prev = numpy.zeros( len(fid))
    
    final_iter_value  = 0
    final_tol_value   = 0
    final_numpeaks_value = 0
    
    numberOfpeaks = []
    sig_difference = [0,]
    fid1 = fid.copy()
    
    k=0
    rrr = fft(fid1)
    
    
    noise_not_reached = True

        
    while  noise_not_reached and numPeaks( sss ) <maxPeaks:
        
        sss_prev =  numpy.copy(sss)
        
        rrr = fft(fid1)
        m1 = max(rrr.real)*factor
               
        for i,r in enumerate(rrr):
            if r.real > m1:
                sss[i] = sss[i]+rrr[i].real-m1
                rrr[i] = complex(m1)
        sig_difference.append( float(sss.sum()))
        rrr = rrr.real + 1j * hilbert( rrr.real )
        
        fid1 = ifft(rrr)*mask

        
        numberOfpeaks.append( numPeaks( sss ))

        
        npeaks = numpy.array(numberOfpeaks)
    
#        noise_not_reached = True
        if k>=5:
            ddd = abs(npeaks[1:]-npeaks[:-1])
            ppp = (numpy.where(ddd>0))[0]
            ppp1 = ppp[1:]-ppp[:-1]
            for j,v in enumerate(ppp1):


                if v < peak_separation and npeaks[-1]>maxPeaks:
                    noise_not_reached = False 
                    print "reached noise level",v,npeaks[-1],k
                    break

        k += 1
        if k >  num_iter:
            print "reached max iterations"
            noise_not_reached = False
            
        if k>2:
            
#            tol = (float(sig_difference[-1])-float(sig_difference[-2]))/float(sig_difference[-1])
            
            tol = (float(sig_difference[-1])-float(sig_difference[-2]))/float(sig_difference[-1])
            
            if tol < tolerance:
                
                noise_not_reached = False
            
        
    final_iter_value = k

#    print k,
                    
    return( sss_prev, [final_iter_value, numPeaks( sss ), tol ] )
Exemplo n.º 54
0
degree_sign= u'\N{DEGREE SIGN}'
datadir='/lustre/janus_scratch/life9360/ses3d_working_dir_2016/OUTPUT'
stafile='/lustre/janus_scratch/life9360/ses3d_working_dir_2016/INPUT/recfile_1'
dbase=symdata.ses3dASDF('/lustre/janus_scratch/life9360/ASDF_data/ses3d_2016_10sec_3comp.h5')

SLst=stations.StaLst()
SLst.read(stafile)
evlo=129.0
evla=41.306
st = dbase.get_stream(staid='SES.98S45', SLst=SLst)

# stla, elve, stlo = dbase.waveforms['SES.98S45'].coordinates.values()
# print stlo, stla
# st[0].data=hilbert(st[0].data)
# st[1].data=hilbert(st[1].data)
st[2].data=hilbert(st[2].data)*-1.

stime=st[0].stats.starttime+643
etime=st[0].stats.starttime+720
rel= obspy.signal.polarization.polarization_analysis(stream=st, win_len=10., win_frac=0.1, frqlow=0.095, frqhigh=0.105, stime=stime, etime=etime, verbose=True,
            method='pm', var_noise=0.0)
# plt.plot(rel['timestamp'], rel['incidence'], 'bo')
# fig, ax=plt.subplots()
# plt.errorbar(rel['timestamp'], rel['incidence'], yerr=np.degrees(rel['incidence_error']))
# plt.subplots()
# ax.fill_betweenx(np.array([0, 80, 650, 670, facecolor='red', alpha=0.5)
ax.errorbar(rel['timestamp'], rel['azimuth'], yerr=np.degrees(rel['azimuth_error']), fmt='r')
# ax.plot(rel['timestamp'], rel['azimuth'], fmt='r')
# ax.fill_betweenx(np.array([0, 80]), 650, 670, facecolor='red', alpha=0.5)
# plt.show()
####################
ax2.plot(Tmed, np.ones(Tmed.size)*231, 'g-', lw=5)
plt.xlim(550, 750)
ax2.set_ylabel('Propagation Angle ('+degree_sign+')', color='b', fontsize=20)
ax2.set_yticks(np.arange(225, 260, 5))
ax2.set_ylim(225, 255)
# # ax2.set_yticks(np.arange(45, 71, 6))
# # ax2.set_ylim(225, 250)
# ax2.tick_params(labelsize=20)
# ax1.set_xlabel('Time (sec)', fontsize=20)
# for tl in ax2.get_yticklabels():
#     tl.set_color('b')
plt.show()
# 
trZ=st3.select(channel='*Z')[0]
trR=st3.select(channel='*R')[0]
ratio=hilbert(trR.data).max()/trZ.data.max()
plt.plot(hilbert(trR.data)/ratio)
plt.plot(trZ.data)
plt.show()
# 
# dbase.readtxt(datadir=datadir, stafile=stafile, channel='all', verbose=True, VminPadding=2.7)
# dbase.readtxt(datadir=datadir, stafile=stafile, channel='all', verbose=True, VminPadding=2.0, factor=10)

# evlo=129.0
# evla=41.306
# try:
#     del dbase.events
# except:
#     pass
# dbase.AddEvent(evlo, evla, evdp=1.0)
# -*- coding: utf-8 -*-
from scipy import signal
from scipy import fftpack
import numpy as np
import pylab as pl

# 某个均衡滤波器的参数
a = np.array([1.0, -1.947463016918843, 0.9555873701383931])
b = np.array([0.9833716591860479, -1.947463016918843, 0.9722157109523452])

# 44.1kHz, 1秒的频率扫描波
t = np.arange(0, 0.5, 1/44100.0)
x= signal.chirp(t, f0=10, t1 = 0.5, f1=1000.0)

# 直接一次计算滤波器的输出
y = signal.lfilter(b, a, x)

hy = fftpack.hilbert(y)
pl.plot( np.sqrt(y**2 + hy**2),"r", linewidth=2) 
pl.plot(y)
pl.show()
Exemplo n.º 57
0
def analytic_signal(x):
    """ A short-cut assuming that the incoming signal is reasonable
    e.g. fairly pure sinusoid.
    """ 
    x = x - np.mean(x)
    return(x+1j*hilbert(x))
Exemplo n.º 58
0
def getPhase(x):     
    return numpy.arctan2(x,hilbert(x))
Exemplo n.º 59
0
# filterS1=_aftan_gaussian_filter(alpha=alpha, omega0=2*np.pi/T0, ns=ns, indata=sp1, omsArr=omsArr)
# filterS2=_aftan_gaussian_filter(alpha=alpha, omega0=2*np.pi/T0, ns=ns, indata=sp2, omsArr=omsArr)
# 
# st[0].data=np.fft.ifft(filterS0, ns)
# st[1].data=np.fft.ifft(filterS1, ns)
# st[2].data=np.fft.ifft(filterS2, ns)


st.plot(type='relative')
st.decimate(10)
stla, elve, stlo = dbase.waveforms['SES.98S47'].coordinates.values()
dt=st[0].stats.delta
dist, az, baz_ev=obspy.geodetics.gps2dist_azimuth( stla, stlo, evla, evlo)
dist=dist/1000.

vmin = 2.; vmax = 5.
tmin=dist/vmax; tmax=dist/vmin; twin=1; tlength=20

tmin=640; tmax=700
n0 = int(tmin/dt); nt = int(tmax/dt)
trE=st.select(channel='*E')[0]; trN=st.select(channel='*N')[0]; trZ=st.select(channel='*Z')[0]
dataE = trE.data[n0:nt]; dataN = trN.data[n0:nt]; dataZ = trZ.data[n0:nt]
dataE = trE.data; dataN = trN.data; dataZ = trZ.data

plt.plot(hilbert(dataZ)*-1)
plt.plot(dataE)
# plt.scatter(dataE, dataN, c=n0+np.arange(nt-n0), cmap='jet')
# plt.subplots()
# plt.scatter( n0+np.arange(nt-n0), dataZ, c=n0+np.arange(nt-n0), cmap='jet')

plt.show()
Exemplo n.º 60
0
 def time_hilbert(self, size, soltype):
     if soltype == 'fft':
         hilbert(self.f)
     else:
         direct_hilbert(self.f)