示例#1
0
def calc_cospectrum(a,b,**kwargs):
    nfft_time = np.shape(a)[0]
    if 'nfft_time' in kwargs.keys():
        nfft_time = kwargs['nfft_time']
    nlon = np.shape(a)[1]
    fa = fft.fft(a,axis=1)
    fb = fft.fft(b,axis=1)
    nomega = nfft_time/2+1
    nk = nlon/2+1 
    cfa = np.real(fa[:,:nk])
    sfa = np.imag(fa[:,:nk])
    cfb = np.real(fb[:,:nk])
    sfb = np.imag(fb[:,:nk])
    pp = np.zeros([nomega,nk])
    pn = np.zeros([nomega,nk])
    for i in range(nk):
        omega, pcacb = signal.csd(cfa[:,i],cfb[:,i],nperseg=nfft_time)
        omega, psasb = signal.csd(sfa[:,i],sfb[:,i],nperseg=nfft_time)
        omega, pcasb = signal.csd(cfa[:,i],sfb[:,i],nperseg=nfft_time)
        omega, psacb = signal.csd(sfa[:,i],cfb[:,i],nperseg=nfft_time)
        pp[:,i] = np.real(pcacb)+np.real(psasb)+np.imag(pcasb)-np.imag(psacb)
        pn[:,i] = np.real(pcacb)+np.real(psasb)-np.imag(pcasb)+np.imag(psacb)
    p_all = np.zeros([nomega*2,nk])
    p_all[:nomega,:] = np.flipud(pn)
    p_all[nomega:,:] = pp
    sigma = 0.25/np.pi*nomega
    x = np.linspace(-nomega/2,nomega/2,nomega)
    gauss = np.exp(-x**2/(2*sigma**2))
    gauss = gauss/np.sum(gauss)
    for i in range(nk):
        p_all[:,i] = np.convolve(p_all[:,i],gauss,mode='same')
    omega_all = np.concatenate((np.flipud(-omega),omega))
    return p_all,omega_all
示例#2
0
def b(v, F, I, idx):
    x_i = np.dot(v, F[idx][:, 0])
    y_i = np.dot(v, F[idx][:, 1])
    z_i = np.dot(v, F[idx][:, 2])
    vec = [x_i, y_i, z_i]

    return ifft(fft(I[idx] * fft(l(vec))))
def fwgn_model(fm,fs,N):
    N = int(N)
    Nfft = 2**max(3,nextpow2(2*fm/fs*N))
    Nifft = math.ceil(Nfft*fs/(2*fm))

    doppler_coeff = np.sqrt(doppler_filter(fm,Nfft))

    CGI, CGQ = fft(randn(Nfft)), fft(randn(Nfft))
    f_CGI = CGI * doppler_coeff
    f_CGQ = CGQ * doppler_coeff

    del CGI, CGQ, doppler_coeff
    gc.collect()

    tzeros = np.zeros(abs(Nifft-Nfft))
    filtered_CGI = np.hstack((f_CGI[:Nfft//2], tzeros, f_CGI[Nfft//2:]))
    filtered_CGQ = np.hstack((f_CGQ[:Nfft//2], tzeros, f_CGQ[Nfft//2:]))

    del tzeros, f_CGI, f_CGQ
    gc.collect()

    hI, hQ = ifft(filtered_CGI), ifft(filtered_CGQ)

    del filtered_CGI, filtered_CGQ
    gc.collect()

    rayEnvelope = np.abs(np.abs(hI) + 1j * hQ)
    rayRMS = math.sqrt(np.mean(rayEnvelope[:N]**2))

    # h_{I}(t) - jh_{Q}(t)
    # Here we have the phase shift of pi/2 when multiplying the imaginary
    # portion by -1j
    h = (np.real(hI[:N]) - 1j * np.real(hQ[:N]))/rayRMS

    return h
    def magabs_cs():
        #b.line_plot("magabs_cs", a.frequency, a.MagAbs[:, 0])
        #b.line_plot("magabs_cs", a.frequency, a.MagAbs[:, 257])

        if 0:
            myifft=fft.ifft(a.Magcom[:,500])
            myifft[50:-50]=0.0
            #myifft[:20]=0.0
            #myifft[-20:]=0.0
            bg=fft.fft(myifft)
            filt=[]
            for n in range(len(a.yoko)):
                myifft=fft.ifft(a.Magcom[:,n])
                #b.line_plot("ifft", absolute(myifft))
                myifft[50:-50]=0.0
                #myifft[:20]=0.0
                #myifft[-20:]=0.0
                filt.append(absolute(fft.fft(myifft)-bg))
            b.colormesh("filt", a.frequency, a.yoko, filt)
        if 1:
            myifft=fft.ifft(mag[:,500])
            myifft[40:-40]=0.0
            myifft[:20]=0.0
            myifft[-20:]=0.0
            bg=fft.fft(myifft)
            filt=[]
            for n in range(len(yok)):
                myifft=fft.ifft(mag[:,n])
                #b.line_plot("ifft", absolute(myifft))
                myifft[50:-50]=0.0
                #myifft[:20]=0.0
                #myifft[-20:]=0.0
                filt.append(absolute(fft.fft(myifft)))
            b.colormesh("filt", frq, yok, filt)
示例#5
0
 def potential(self):
     density=self.rho_den()
     soft_pot=self.softened_pot()
     ft1=fft(density)
     ft2=fft(soft_pot)
     conv_pot=ifft(ft1*ft2)
     return conv_pot
示例#6
0
    def lms(self):
        global wf_ech, wf_ref
        M = int(2 ** np.ceil(np.log2(self.M)))
        u = util.enframe(np.append(wf_ref, np.zeros(M)), 2 * M, M)
        d = util.enframe(wf_ech, M, 0)
        uf = nf.fft(u, 2 * M, 1)
        W = nf.fft(self.w, 2 * M)

        wf_ech = np.array([], np.complex128)

        for ii in range(0, u.shape[0]):
            mu_bar = (110 * np.exp(-(ii) / self.estep) + 1) * self.mu
            yfi = W * uf[ii, :]  # 1x2M
            yi = nf.ifft(yfi, 2 * M)  # 1x2M
            yi = yi[-M:]  # 1xM

            ei = d[ii, :] - yi  # 1xM
            efi = nf.fft(np.append(np.zeros(M), ei), 2 * M)  # 1x2M

            uefi = nf.ifft(np.conj(uf[ii, :]) * efi, 2 * M)
            uefi[-M:] = 0
            uei = nf.fft(uefi, 2 * M)

            W = W + 2 * mu_bar * uei
            wf_ech = np.append(wf_ech, ei)

        self.w = nf.ifft(W, 2 * M)
示例#7
0
def compacf(acfall,noiseall,Nr,dns,bstride,ti,tInd):
    bstride=bstride.squeeze()
    assert bstride.size==1 #TODO

    Nlag = acfall.shape[2]
    acf  =      zeros((Nr,Nlag),complex64) #NOT empty, note complex64 is single complex float
    spec =      empty((Nr,2*Nlag-1),complex128)
    try:
        acf_noise = zeros((noiseall.shape[3],Nlag),complex64)
        spec_noise= zeros(2*Nlag-1,complex128)
    except AttributeError:
        acf_noise = None
        spec_noise= 0.

    for i in range(tInd[ti]-1,tInd[ti]+1):
        acf += (acfall[i,bstride,:,:,0] + 1j*acfall[i,bstride,:,:,1]).T
        if acf_noise is not None: #must be is not None
            acf_noise += (noiseall[i,bstride,:,:,0] + 1j*noiseall[i,bstride,:,:,1]).T

    acf = acf/dns/(i-(tInd[ti]-1)+1) #NOT /=
    if acf_noise is not None: #must be is not None
        acf_noise = acf_noise/dns / (i-(tInd[ti]-1)+1)
#%% spectrum noise
    if acf_noise is not None:
        for i in range(Nlag):
            spec_noise += fftshift(fft(append(conj(acf_noise[i,1:][::-1]),acf_noise[i,:])))


        spec_noise = spec_noise / Nlag
#%% spectrum from ACF
    for i in range(Nr):
        spec[i,:] = fftshift(fft(append(conj(acf[i,1:][::-1]), acf[i,:])))-spec_noise


    return spec,acf
示例#8
0
文件: fft.py 项目: darvilp/comphys1
def fft(x):
    N = len(x)
    if N <= 1: return x
    even = fft(x[0::2])
    odd =  fft(x[1::2])
    return [even[k] + exp(-2j*pi*k/N)*odd[k] for k in xrange(N/2)] + \
           [even[k] - exp(-2j*pi*k/N)*odd[k] for k in xrange(N/2)]
def MaxInnerProd(ser1, ser2, PSD):
  size = Numeric.shape(ser1)[0]
  pdlen = size/2
  nyquistf = 0.5/15.0   #   !!! hardcoded !!!!
  freqs = Numeric.arange(0,pdlen+1,dtype='d') * (nyquistf / pdlen)
  if(Numeric.shape(ser2)[0] != size):
     print "size of time series must be the same"
     sys.exit(1)
  if(Numeric.shape(PSD)[0] != pdlen):
     print "wrong size of psd: ", pdlen, Numeric.shape(PSD)
     sys.exit(1)
  fourier1 = FFT.fft(ser1)
  fourier2 = FFT.fft(ser2)
  prod1 = Numeric.zeros(pdlen+1, dtype='d')
  prod2 = Numeric.zeros(pdlen+1, dtype='d')
  prod1[0] = 0.0
  prod1[1:pdlen] = numpy.multiply(fourier1[1:pdlen],numpy.conjugate(fourier2[1:pdlen])) + numpy.multiply(fourier1[-1:pdlen:-1],numpy.conjugate(fourier2[-1:pdlen:-1]))
  prod1[pdlen] = fourier1[pdlen]*fourier2[pdlen]
  prod2[0] = 0.0
  prod2[1:pdlen] = numpy.multiply(fourier1[1:pdlen],numpy.conjugate(fourier2[1:pdlen]*1.j)) + numpy.multiply((fourier1[-1:pdlen:-1]),numpy.conjugate(fourier2[-1:pdlen:-1]*(-1.j)))
  prod2[pdlen] = fourier1[pdlen]*fourier2[pdlen]
  Numeric.divide(prod1[1:], PSD, prod1[1:]) 
  Numeric.divide(prod2[1:], PSD, prod2[1:]) 
  olap0 = 0.0
  olappiby2 = 0.0
  for i in xrange(pdlen):
      if (freqs[i] > fLow and freqs[i]<= fHigh):
           olap0 += prod1[i]
	   olappiby2 += prod2[i]
  olap0 = 2.0*olap0/float(size)
  olappiby2 = 2.0*olappiby2/float(size)
#  olap0 =  2.0*(numpy.sum(prod1[1:]))/float(size) #it must be scaled by dt
#  olappiby2 =  2.0*(numpy.sum(prod2[1:]))/float(size) #it must be scaled by dt
  print "angle of maxim. = ", math.atan(olappiby2/olap0)
  return sqrt(olap0**2 + olappiby2**2) 
def filt(frq,tol,data,spd):
    ''' this function filters signals given a filtering frequency (frq), tolerance (tol), data, and sampling freqency (spd) '''
    #define frequency tolerance range
    lowcut = (frq-frq*tol)
    highcut = (frq+frq*tol)
    #conduct fft
    ffta = fft.fft(data)
    bp2 = ffta[:]
    fftb = fft.fftfreq(len(bp2))
    #make every amplitude value 0 that is not in the tolerance range of frequency of interest
    #24 adjusts the frequency to cpd
    for i in range(len(fftb)):
        #spd is samples per day (if hourly = 24)
        if (fftb[i]*spd)>highcut or (fftb[i]*spd)<lowcut:
            bp2[i]=0
    #conduct inverse fft to transpose the filtered frequencies back into time series
    crve = fft.ifft(bp2)    #complex number returned
    #convert back to frequency domain
    fta = fft.fft(crve)
    rl = fta.real
    im = fta.imag
    mag = [math.sqrt(rl[i]**2 + im[i]**2) for i in range(len(rl))] # magnitude
    phase = [math.atan2(im[i],rl[i]) for i in range(len(rl))]       # phase
    yfilt = crve.real       #real component of complex number
    zfilt = crve.imag       #imaginary component of complex nunmber
    return yfilt, zfilt, crve, mag, phase
示例#11
0
文件: gpr.py 项目: kmunve/processgpr
    def applyFFT(self, wdir=os.getcwd(), envelope=True):
        '''
        FFT transfers the FD data to TD data
        '''
        self.info("Applying FFT ...")
        workfile = os.path.join(wdir, "tmp" + os.path.basename(self.filename) + ".dat")
        fid = open(workfile, "wb")
        if envelope:
            for trace in xrange(self.traces):
                np.abs(fft.fft(self.data[trace])).tofile(fid)  # envelope
        else:
            for trace in xrange(self.traces):
                np.real(fft.fft(self.data[trace])).tofile(fid)  # incl. phase

        fid.close()
        fid = open(workfile, "rb")
        try:  # Zero-padding applied
            self.data = np.zeros((self.numfreqpadded, self.traces), dtype=np.float32)
            for i in range(self.traces):
                fid.seek(self.numfreqpadded * i * 8)
                self.data[:, i] = np.fromfile(fid, "<f8", self.numfreqpadded)
        except AttributeError:  # Zero-padding not applied
            self.data = np.zeros((self.numfreq, self.traces), dtype=np.float32)
            for i in range(self.traces):
                fid.seek(self.numfreq * i * 8)
                self.data[:, i] = np.fromfile(fid, "<f8", self.numfreq)

        fid.close()
        self.samples = self.data.shape[0]
        self.traces = self.data.shape[1]
        self.t = np.arange(0, self.deltaT * self.samples, self.deltaT) * 1e9  #: time axis in ns
        self.r = self.t * self.cmed / 2.0  #: range axis in m
        self.done()
示例#12
0
def deconvolve(signal, HRFs, SNRs):
	"""Deconvolve signal using Wiener deconvolution."""
	H = fft(HRFs, len(signal), axis=0)
	wiener_filter = np.conj(H) / (H * np.conj(H) + 1 / SNRs**2)
	deconvolved = np.real(ifft(wiener_filter * fft(signal, axis=0), axis=0))

	return deconvolved
示例#13
0
 def _spddirect2(self, n):
     '''this looks bad, maybe with an fftshift
     '''
     #size = s1+s2-1
     hw = (fft.fft(np.r_[self.ma[::-1],self.ma], n)
             / fft.fft(np.r_[self.ar[::-1],self.ar], n))
     return (hw*hw.conj()) #.real[n//2-1:]
示例#14
0
def autocorr_fft(signal, axis = -1):
    """Return full autocorrelation along specified axis. Use fft
    for computation."""
    if N.ndim(signal) == 0:
        return signal
    elif signal.ndim == 1:
        n       = signal.shape[0]
        nfft    = int(2 ** nextpow2(2 * n - 1))
        lag     = n - 1
        a       = fft(signal, n = nfft, axis = -1)
        au      = ifft(a * N.conj(a), n = nfft, axis = -1)
        return N.require(N.concatenate((au[-lag:], au[:lag+1])), dtype = signal.dtype)
    elif signal.ndim == 2:
        n       = signal.shape[axis]
        lag     = n - 1
        nfft    = int(2 ** nextpow2(2 * n - 1))
        a       = fft(signal, n = nfft, axis = axis)
        au      = ifft(a * N.conj(a), n = nfft, axis = axis)
        if axis == 0:
            return N.require(N.concatenate( (au[-lag:], au[:lag+1]), axis = axis), \
                    dtype = signal.dtype)
        else:
            return N.require(N.concatenate( (au[:, -lag:], au[:, :lag+1]), 
                        axis = axis), dtype = signal.dtype)
    else:
        raise RuntimeError("rank >2 not supported yet")
示例#15
0
def ild_bare(hrir, cf, **kwdargs):
    '''
    ILD computation routine. called by ild that handles multiprocessing,...
    '''
    samplerate = hrir.samplerate

    # perform some checks and special cases
    if (hrir[:,0] == hrir[:,1]).all():
        return np.zeros(len(cf))

    if (abs(hrir[:,0])<= 10e-6).all() or  (abs(hrir[:,1])<=10e-6).all():
        log_debug('Blank hrirs detected, output will be weird')

    if not isinstance(hrir, Sound):
        hrir = Sound(hrir, samplerate = samplerate)

    fb = Gammatone(Repeat(hrir, len(cf)), np.hstack((cf, cf)))
    filtered_hrirset = fb.process()
    
    ilds = []
    for i in range(len(cf)):
        left = filtered_hrirset[:, i]
        right = filtered_hrirset[:, i+len(cf)]
        # This FFT stuff does a correlate(left, right, 'full')
        Lf = fft(np.hstack((left, np.zeros(len(left)))))
        Rf = fft(np.hstack((right[::-1], np.zeros(len(right)))))
        C = ifft(Lf*Rf).real
        ilds.append(np.sqrt(np.amax(C)/sum(right**2)))
    ilds = np.array(ilds)
    return ilds
示例#16
0
文件: convolve.py 项目: muhl/PhaC
def convolve(A,B,):
    N = A.shape[0]
    fA = fft.fft(A)
    fB = fft.fft(B)
    one = np.ones(N)
    one[1::2] = -1
    return np.real(fft.ifft(fA*fB)/float(N))
示例#17
0
    def InvPotentialVorticity(self,field, length=None):

        if length is None:
            length = 2*pi;

        N = shape(field)[0];

        k = array(range(N),dtype=complex128);
        k = concatenate((range(0,N/2),range(-N/2,0)));
        k *= (2*pi)/length;

        [KX, KY] = meshgrid(k,k);

        """ We are trying to solve d_yy(eta) + d_xx(eta) - eta = p
        Therefore, in Fourier space, it will become
        (-(kx^2 + ky^2) - 1)etaHat = pHat
        """
        delsq = -(KX*KX + KY*KY) - 1 ;
        delsq[0,0] = 1;

        tmp = fft(field,axis=0);
        tmp = fft(tmp,axis=1);

        tmp = tmp/delsq;

        tmp = ifft(tmp,axis=1);
        tmp = ifft(tmp,axis=0);

        return tmp.real;
示例#18
0
文件: Xcorr.py 项目: mmoussallam/PyMP
def XcorrNormed(x,y):
    """ method useful to compare binary vectors """
    if len(x) != len(y):
        raise ValueError("signals should be the same size")

    X = fft(x)
    Y = fft(y)

    # Compute classic cross-correlation
    classic_xcorr = (ifft(X*(Y.conj()))).real
    maxlag = len(X)/2
    classic_xcorr = concatenate((classic_xcorr[-maxlag:],classic_xcorr[0:maxlag]));
    ind = abs(classic_xcorr).argmax();
#    ind = classic_xcorr.argmax()
    val = classic_xcorr[ind]

    # normalize
    normx = math.sqrt(sum((x)**2))
    normy = math.sqrt(sum((y)**2))
#    print 'Norm of ' , normx*normy , ' for a value found of ' , val
#    val = float(val)/(float(len(nonzero(x)[0]) + len(nonzero(y)[0]))/2)
    if (normx * normy) != 0:
        val = float(val)/(normx * normy)

    return classic_xcorr , (len(X)/2 - ind) , val;
示例#19
0
def myshift(x,n=0):
    '''myshift(x,n): shift an array x by an amount n using FFTs'''
    vec=0*x  #make a vector of zeros the same length as the input vector
    vec[n]=1
    vecft=fft(vec)
    xft=fft(x)
    return numpy.real(ifft(xft*vecft))
示例#20
0
文件: hrr.py 项目: MatthewAKelly/HDM
 def __rmul__(self,other):
     if isinstance(other,HRR):
         x=ifft(fft(self.v)*fft(other.v)).real
         x=x/norm(x)
         return HRR(data=x)
     else:
         return HRR(data=self.v*other)
示例#21
0
def noiseproduct(signal1,signal2,noise,stime,npatches):
    """Compute the noise inner product for signal1 and signal2, where
    the two signals are sampled at intervals stime, and the product is computed
    for the total duration represented in the array, against noise
    represented by the time series noise; npatches overlapping periods are used
    to estimate the PSD of the noise."""
    
    # compute signal FFT without windowing or averaging
    # this definition of the FFT satisfied Parseval's theorem, with
    # sum(signal**2) * stime == sum(abs(sfour)**2) / (stime*length(signal))
    # [since deltaf = 1 / (totaltime)]

    sfour1 = stime * FFT.fft(signal1)
    sfour2 = stime * FFT.fft(signal2)
    
    # compute the noise spectrum, using segment averaging,
    # and interpolate the noise to be defined on the same frequencies
    # of the signal's spectrum

    nspec = spect(noise,stime,npatches)

    siglen = len(signal1)
    deltaf = 1.0 / (stime * siglen)

    fourlen = siglen/2 + 1
    ispec = Numeric.zeros([fourlen,2],dtype='d')

    ispec[:,0] = deltaf * Numeric.arange(0,fourlen)
    # ispec[:,1] = arrayfns.interp(nspec[:,1],nspec[:,0],ispec[:,0])
    ispec[:,1] = linearinterpolate(nspec[:,1],nspec[:,0],ispec[:,0])
   
    return 4.0 * real(sum(sfour1[1:fourlen] * conjugate(sfour2[1:fourlen]) / ispec[1:,1])) * deltaf
示例#22
0
def decodefft(finfo,data, chans=None):
    if chans==None: chans = finfo.num_chan
    from numpy.fft import fft,ifft
    #output: decoded data with the number of heights reduced
    #two variables are added to the finfo class:
    #deco_num_hei, deco_hrange
    #data must be arranged either: 
    #    (channels,heights,profiles) (C-style, profs change faster)
    #or: (profiles,heights,channels) (C-style, profs change faster)
    #fft along the entire(n=None) acquired heights(axis=1), stores in data
    data=fft(data,n=None,axis=1) 
    NSA=finfo.num_hei
    num_codes=finfo.subcode.shape[0]
    num_bauds=finfo.subcode.shape[1]
    fft_code=fft(finfo.subcode,finfo.num_hei,1).conj()
    #if the order is (channels,heights,profiles)
    if data.shape[0]==chans:  
        for code_ind in range(num_codes):
            for hei_ind in range(NSA):
                data[:,hei_ind,code_ind::num_codes]=\
                data[:,hei_ind,code_ind::num_codes]*fft_code[code_ind,hei_ind]
    else:  #if the order is (profiles,heights,channels)
        for code_ind in range(num_codes):
            for hei_ind in range(NSA):
                data[code_ind::num_codes,hei_ind,:]=\
                data[code_ind::num_codes,hei_ind,:]*fft_code[code_ind,hei_ind]
    data=ifft(data,None,1) #fft along the heightsm
    return data[:,:-num_bauds+1,:]
示例#23
0
def STRW(x, om, OM, up):
    M = len(x)
    N = len(x[0])
   
    # 2d convolution:
    y = np.ndarray(shape=(M, N), dtype='complex')
    h = get_wavelet(om, OM, up, M, N)
    
    # temporal convolution:
    for m in range(M):
        X = fft(x[m])
        H = fft(h[m])
        Z = X*H

        z = ifft(Z)
        y[m] = z

    # spectral convolution
    x = y.transpose()
    h = h.transpose()
    y = y.transpose()
    for n in range(N):
        X = fft(x[n])
        H = fft(h[n])
        Z = X*H
        
        z = ifft(Z)
        y[n] = z

    y = y.transpose()

    return y
示例#24
0
            def tbstim():
                ysave = np.zeros(Nfft)
                xsave = np.zeros(Nfft)
                psave = np.zeros(Nfft)

                self.yfavg = np.zeros(Nfft)
                self.xfavg = np.zeros(Nfft)
                self.pfavg = np.zeros(Nfft)

                for ii in range(num_loops):
                    for jj in range(Nfft):
                        yield sigin.data_valid.posedge
                        xsave[jj] = float(sigin.data)/self.max
                        yield sigout.data_valid.posedge
                        ysave[jj] = float(sigout.data)/self.max
                        # grab the response from the floating-point model
                        psave[jj] = self.filter_model(float(xf))

                    # remove any zeros
                    xsave[xsave == 0] = 1e-19
                    ysave[ysave == 0] = 1e-19
                    psave[psave == 0] = 1e-19

                    # average the FFT frames (converges the noise variance)
                    self.yfavg += (np.abs(fft(ysave, Nfft)) / Nfft)
                    self.xfavg += (np.abs(fft(xsave, Nfft)) / Nfft)
                    self.pfavg += (np.abs(fft(psave, Nfft)) / Nfft)

                raise StopSimulation
示例#25
0
def ccor(ts1, ts2):
    '''
    Given two standardized time series, computes their cross-correlation using
    fast fourier transform. Assume that the two time series are of the same
    length.

    Parameters
    ----------
    ts1 : TimeSeries
        A standardized time series
    ts2 : TimeSeries
        Another standardized time series

    Returns
    -------
    The two time series' cross-correlation.
    '''
    # calculate fast fourier transform of the two time series
    fft_ts1 = nfft.fft(ts1.valuesseq)
    fft_ts2 = nfft.fft(ts2.valuesseq)

    # print(len(ts1))
    # print(len(ts2))
    # assert len(ts1) == len(ts2)

    # return cross-correlation, i.e. the convolution of the first fft
    # and the conjugate of the second
    return ((1 / (1. * len(ts1))) *
            nfft.ifft(fft_ts1 * np.conjugate(fft_ts2)).real)
示例#26
0
文件: fft256.py 项目: AltmanEA/DSP
def fft16_a4_2(x):
    global mul_count
    tmp = x
    w3, w2, w1 = get_w_fft16_r2()
    mul_count += flops4Muls(w1)
    mul_count += flops4Muls(w2)
    mul_count += flops4Muls(w3)

    for j in range(8):
        tmp[j:j+9:8] = fft(tmp[j:j+9:8])
    tmp *= w1

    for i in range(2):
        for j in range(4):
            tmp[i*8+j:i*8+j+5:4] = fft(tmp[i*8+j:i*8+j+5:4])
    tmp *= w2

    for i in range(4):
        for j in range(2):
            tmp[i*4+j:i*4+j+3:2] = fft(tmp[i*4+j:i*4+j+3:2])
    tmp *= w3

    for i in range(8):
        tmp[i*2:i*2+2] = fft(tmp[i*2:i*2+2])

    return bit_revers(tmp)
def InnerProd(ser1, ser2, PSD):
  size = Numeric.shape(ser1)[0]
  pdlen = size/2
  nyquistf = 0.5/15.0   #   !!! hardcoded !!!!
  freqs = Numeric.arange(0,pdlen+1,dtype='d') * (nyquistf / pdlen)
  if(Numeric.shape(ser2)[0] != size):
     print "size of time series must be the same"
     sys.exit(1)
  if(Numeric.shape(PSD)[0] != pdlen):
     print "wrong size of psd: ", pdlen, Numeric.shape(PSD)
     sys.exit(1)
  fourier1 = FFT.fft(ser1)
  fourier2 = FFT.fft(ser2)
  prod = Numeric.zeros(pdlen+1, dtype='d')
  prod[0] = 0.0
  prod[1:pdlen] = numpy.multiply(fourier1[1:pdlen],numpy.conjugate(fourier2[1:pdlen])) + numpy.multiply(fourier1[-1:pdlen:-1],numpy.conjugate(fourier2[-1:pdlen:-1]))
  prod[pdlen] = fourier1[pdlen]*fourier2[pdlen]
  Numeric.divide(prod[1:], PSD, prod[1:]) 
  olap0 = 0.0
  for i in xrange(pdlen):
      if (freqs[i] > fLow and freqs[i]<= fHigh):
           olap0 += prod[i]
  olap0 = 2.0*olap0/float(size)
 # olap0 =  2.0*(numpy.sum(prod[1:]))/float(size) #it must be scaled by dt
  return  olap0
示例#28
0
def analytic_signal(vi, windwidth, percover, win):
    nvi = len(vi)
    #h = zeros(vi.shape)
    bli, ble, num = fftsegs(windwidth, percover, nvi)
    for ii in range(len(bli)):
        v = vi[bli[ii]:ble[ii]+1]
        nv = len(v)
        if win == 1:
            fv = fft(v * numpy.hamming(nv))
        else:
            fv = fft(v)
        wind = zeros(v.size)
        # zero negative frequencies, double positive frequencies
        if nv % 2 == 0:
            wind[0] = 1  # keep DC
            wind[(nv / 2)] = 1
            wind[1:(nv / 2)] = 2  # double pos. freq

        else:
            wind[0] = 1
            wind[range(1, (nv + 1) / 2)] = 2
        h = ifft(fv * wind)
    for i in range(len(h)):
        h[i] /= numpy.complex(num[i])
    return h
示例#29
0
def convFFT(x,y):
    nx=len(x)
    xf=np.pad(x,(nx/2,nx/2),mode='constant')
    yf=np.pad(y,(nx/2,nx/2),mode='constant')
    xf=(fft.fft(fft.fftshift(xf)))
    yf=(fft.fft(fft.fftshift(yf)))
    return fft.fftshift(np.real((fft.ifft(xf*yf))))[nx/2:3*nx/2]
示例#30
0
def D2dWT(x, seeds, seedt, OM, om):
    '''
    Discrete two-dimenstional Wavelet Transform
    -   x: input signal (one-dimensional)
    -   ws: spectral wavelet
    -   wt: temporal wavelet
    '''
    N = len(x)
    ws_t = seeds(np.arange(-100, 100)/100.0, OM)
    ws = np.zeros(200)
    '''
    ws[100:] = ws_t[:100]
    ws[:100] = ws_t[100:]
    '''
    ws = ws_t
    T = int(3.0 / om * 44100)
    wt = seedt(np.arange(T)/44100.0, om)
    WS = fft(ws)
    plt.plot(WS); plt.show()
    w = np.ndarray(shape=(T, 200), dtype='complex')
    for t in range(T):
        w[t] = WS * wt[t]
    y = np.zeros(N-T) 
    for tau in range(N-T):
        y[tau] = sum(fft(x[tau:tau+200]) * w[tau])
    return y
示例#31
0
def main(argv):


  #####  Get Data, Bases, Weights, etc...  #####
  if FLAGS.setBases is not None:
    FLAGS.setBases = np.array([int(x) for x in FLAGS.setBases])
  
  temperatures  = np.arange(
      FLAGS.temp_low,
      FLAGS.temp_high + FLAGS.temp_delta/2,
      FLAGS.temp_delta)
  intensities   = np.arange(
      FLAGS.ints_low,
      FLAGS.ints_high + FLAGS.ints_delta/2,
      FLAGS.ints_delta)

  data, doms, sem, fit_lgInds, sn_map = get_data(FLAGS, plot_data=True)
  dom, plt_dom, _ = doms

  weights     = 1./sem**2
  data_weights = np.sqrt(np.sum(
    np.sum(gaussian_filter1d(data, 2.5, axis=-1)**2, axis=-1),
      axis=-1))
  # Ignore L0 projection for UED
  #if FLAGS.experiment == "UED":
  #  data_weights[0] = 0
  data_weights = np.expand_dims(np.expand_dims(data_weights, axis=-1), axis=0)

  fName_suffix = FLAGS.start_time + "_" + FLAGS.end_time
  folderName = os.path.join(
      FLAGS.basis_dir, FLAGS.experiment, FLAGS.basis_sub_dir,
      FLAGS.experiment + "_%8.6f_%8.6f_" % 
        (temperatures[0], intensities[0]) + fName_suffix)
  all_bases, norms, bases = get_bases(
      FLAGS, fit_lgInds, data.shape,
      folderName=folderName)
  
  fit_start_times = FLAGS.fit_start_time +\
      np.arange(FLAGS.Nfit_times)*\
          (FLAGS.fit_end_time-FLAGS.fit_start_time)/\
          max([(FLAGS.Nfit_times-1), 1])

  fit_times = np.ones((FLAGS.Nfit_times, data.shape[-1]))\
      *np.expand_dims(fit_start_times, -1)
  fit_times += np.arange(data.shape[-1])*FLAGS.fit_delta_time\
      + FLAGS.t_cut_low*FLAGS.fit_delta_time



  sim_times = float(FLAGS.start_time) +\
      np.arange(bases.shape[-1])*\
          (float(FLAGS.end_time)-float(FLAGS.start_time))/\
          max([(bases.shape[-1]-1), 1])


  ###################################################################
  #####  Evaluate Chi Squared Values Over Temp and Intensities  #####
  ###################################################################

  # Check if ChiSq is already saved
  if FLAGS.setBases is not None:
    suffix = "_Temp-{}-{}-{}_Ints-{}-{}-{}".format(
        temperatures[0], temperatures[-1], len(temperatures),
        intensities[0], int(100*intensities[-1])/100.0, len(intensities))
  else:
    suffix = "_Nbases-{}_Temp-{}-{}-{}_Ints-{}-{}-{}".format(
        bases.shape[0], temperatures[0], temperatures[-1], len(temperatures),
        intensities[0], int(100*intensities[-1])/100.0, len(intensities))
  if FLAGS.reg_scale > 0:
    ind = suffix.find("Temp")
    suffix = suffix[:ind] + "Reg-{}_".format(FLAGS.reg_scale) + suffix[ind:]
  
  output_prefix = os.path.join("output", FLAGS.experiment, FLAGS.basis_sub_dir)
  if not os.path.exists(output_prefix):
    os.makedirs(output_prefix)

  fit_results_fileName    = os.path.join(output_prefix,
      "{}_fit_results{}.h5".format(FLAGS.experiment, suffix))
  fit_output_fileName     = os.path.join(output_prefix,
      "{}_fit_output{}.h5".format(FLAGS.experiment, suffix))
  fit_landscape_fileName  = os.path.join(output_prefix,
      "{}_fit_landscape{}.h5".format(FLAGS.experiment, suffix))

  # If fit output/results file doesn't exist then fit
  if not os.path.exists(fit_results_fileName) and not FLAGS.debugging:
 
    pool = Pool(processes=70)
    basis_results = pool.map(partial(fit_data_pool,
        data=data,
        sem=sem,
        weights=weights,
        data_weights=data_weights,
        reg_scale=FLAGS.reg_scale,
        setBases=FLAGS.setBases,
        temperatures=temperatures,
        intensities=intensities,
        start_times=fit_times,
        sim_times=sim_times,
        time_delta=FLAGS.fit_delta_time,
        t_cut_low=FLAGS.t_cut_low,
        t_cut_high=FLAGS.t_cut_high,
        experiment=FLAGS.experiment,
        basis_dir=FLAGS.basis_dir,
        fName_suffix=fName_suffix,
        fit_lgInds=fit_lgInds),
        range(len(intensities)*len(temperatures)))

    # Parse fitting results
    temp_ind, ints_ind, start_time, chiSq_val, fit_coeffs = basis_results[0]
    chiSq             = np.ones((len(temperatures), len(intensities)))*np.nan
    best_start_times  = np.ones((len(temperatures), len(intensities)))*np.nan
    all_fit_coeffs      = np.ones((len(temperatures), len(intensities))\
        + fit_coeffs.shape)*np.nan
    best_temp, best_temp_ind = None, None
    best_ints, best_ints_ind = None, None
    best_fit_chiSq = np.inf
    best_start_time = None
    for res in basis_results:
      temp_ind, ints_ind, time_ind, chiSq_val, fit_coeffs = res
      chiSq[temp_ind, ints_ind] = chiSq_val
      best_start_times[temp_ind, ints_ind] = fit_start_times[time_ind]
      all_fit_coeffs[temp_ind, ints_ind] = fit_coeffs
      if chiSq_val < best_fit_chiSq:
        best_fit_chiSq = chiSq_val
        best_start_time = fit_start_times[time_ind]
        best_temp, best_temp_ind = temperatures[temp_ind], temp_ind
        best_ints, best_ints_ind = intensities[ints_ind], ints_ind


    ################################
    #####  Calculate Time STD  #####
    ################################

    time_std_temperatures = temperatures
    time_std_intensities  = intensities

    chiSq_time_std, time_std_range, time_std_temps, time_std_ints =\
        [], [], [], []


    std_times = np.arange(best_start_time - FLAGS.range_STD_time,
        best_start_time + FLAGS.range_STD_time + FLAGS.step_STD_time/2,
        FLAGS.step_STD_time)
    min_temp_ind = np.argmin(np.abs(temperatures[best_temp_ind]\
        - time_std_temperatures))
    min_ints_ind = np.argmin(np.abs(intensities[best_ints_ind]\
        - time_std_intensities))
    
    # Looking at only lg=2
    data_weights_std = np.zeros_like(data_weights)
    ind = np.where(fit_lgInds == 2)[0][0]
    data_weights_std[:,ind,:] = 1.
    
    pool = Pool(processes=70)
    time_results = pool.map(partial(time_std_pool,
        data=data,
        sem=sem,
        weights=weights,
        best_temp_ind=best_temp_ind,
        best_ints_ind=best_ints_ind,
        data_weights=data_weights_std,
        reg_scale=FLAGS.reg_scale,
        setBases=FLAGS.setBases,
        temperatures=time_std_temperatures,
        intensities=time_std_intensities,
        sim_times=sim_times,
        time_delta=FLAGS.fit_delta_time,
        t_cut_low=FLAGS.t_cut_low,
        t_cut_high=FLAGS.t_cut_high,
        experiment=FLAGS.experiment,
        basis_dir=FLAGS.basis_dir,
        fName_suffix=fName_suffix,
        fit_lgInds=fit_lgInds),
        std_times)

    # Parsing results
    for res in time_results:
      time, minChiSq, minInds = res
      time_std_range.append(time)
      chiSq_time_std.append(minChiSq)
      time_std_temps.append(time_std_temperatures[minInds[0]])
      time_std_ints.append(time_std_intensities[minInds[1]])
    time_std_range = np.array(time_std_range)
    chiSq_time_std = np.array(chiSq_time_std)
    time_std_temps = np.array(time_std_temps)
    time_std_ints = np.array(time_std_ints)
    

    # Fitting for t0 std
    def fitFxn(x, c, t0, off):
      return c*(x + t0)**2 + off

    mInd = np.argmin(chiSq_time_std)
    time_std_range_centered = np.array(time_std_range) - time_std_range[mInd]
    keep_inds = np.where((time_std_range_centered > -1*FLAGS.range_STD_time_cut) 
        & (time_std_range_centered < FLAGS.range_STD_time_cut))[0]
    chiSq_time_std = np.array(chiSq_time_std)[keep_inds]
    time_std_range_centered = time_std_range_centered[keep_inds]
    time_std_temps = time_std_temps[keep_inds]
    time_std_ints = time_std_ints[keep_inds]
    #time_std_range_centered *= 1000
    popt, pcov = curve_fit(fitFxn, time_std_range_centered, chiSq_time_std, 
        [1, 0, np.amin(chiSq_time_std)])
    fit_t0 = time_std_range[mInd] - popt[1]
    fit_t0_std = 1./np.sqrt(popt[0])
    fit_t0_min_chiSq = popt[2]
    print("FIT RESULTS", best_temp, best_ints,
        "{} +/- {}".format(fit_t0, fit_t0_std))

    
    #####  Saving Fitting Results  #####
    # Saving fit landscape
    with h5py.File(fit_landscape_fileName, 'w') as h5:
      # Temperature and Intensity Fit
      h5.create_dataset("fit_coeffs", data=all_fit_coeffs)
      h5.create_dataset("temp_ints_chiSq", data=chiSq)
      h5.create_dataset("temp_ints_min_chiSq", data=best_fit_chiSq)
      h5.create_dataset("temp_ints_t0", data=best_start_times)
      h5.create_dataset("temperatures", data=temperatures)
      h5.create_dataset("best_temperature", data=best_temp)
      h5.create_dataset("intensities", data=intensities)
      h5.create_dataset("best_intensity", data=best_ints)
      # T0 fit given best temperature and intensity
      h5.create_dataset("t0_chiSq", data=chiSq_time_std)
      h5.create_dataset("t0_fit_times", data=time_std_range_centered)
      h5.create_dataset("t0_min_chiSq", data=popt[2])
      h5.create_dataset("t0_time_offset", data=popt[1])
      h5.create_dataset("t0_temps_chiSq", data=time_std_temps)
      h5.create_dataset("t0_ints_chiSq", data=time_std_ints)
      h5.create_dataset("t0", data=fit_t0)
      h5.create_dataset("t0_std", data=fit_t0_std)


    ############################################################
    #####  Get Fit Coefficients with Optimal Temp/Ints/t0  #####
    ############################################################

    # Get full dataset
    FLAGS.dom_cut_low, FLAGS.dom_cut_high = None, None
    data, doms, sem, _, sn_map = get_data(FLAGS)
    dom, plt_dom, _ = doms
    weights = 1./sem**2

    # Get optimal bases
    folderName = os.path.join(
              FLAGS.basis_dir, FLAGS.experiment, FLAGS.basis_sub_dir,
              FLAGS.experiment + "_%8.6f_%8.6f_"\
                  % (best_temp, best_ints)\
                  + fName_suffix)

    all_bases_orig, norms, bases_orig = get_bases(
        FLAGS, fit_lgInds, data.shape,
        normalize=False, folderName=folderName)

    bases_interp      = interp1d(sim_times, bases_orig)
    all_bases_interp  = interp1d(sim_times, all_bases_orig)
    times = np.arange(data.shape[-1])*FLAGS.fit_delta_time\
        + fit_t0 + FLAGS.t_cut_low*FLAGS.fit_delta_time
    plt_times = np.concatenate([times, np.array([2*times[-1] - times[-2]])]) 
    
    bases = bases_interp(times)
    all_bases = all_bases_interp(times)

    bases_norm = copy.copy(bases)
    bases_norm[1:] -= np.expand_dims(np.mean(bases[1:], axis=-1), axis=-1)
    norms = np.sqrt(np.sum(bases**2, axis=-1))
    bases_norm = bases_norm/np.expand_dims(norms, axis=-1)
    
    all_bases_norm      = copy.copy(all_bases)
    all_bases_norm[1:] -= np.expand_dims(
        np.mean(all_bases[1:], axis=-1), axis=-1)
    all_norms = np.sqrt(np.sum(all_bases**2, axis=-1))
    all_bases_norm = all_bases_norm/np.expand_dims(all_norms, axis=-1)

    # Fit for optimal C coefficients
    
    fit_coeffs = normal_equation_reg(
        bases.transpose((0,2,1)),
        data,
        weights,
        FLAGS.reg_scale)

    # If bases l<0 then there are multiple l=0 bases
    singular_inds = False
    if FLAGS.setBases is not None:
      singular_inds = []
      for lg in fit_lgInds:
        inds = (lg + FLAGS.setBases) < 0 
        singular_inds.append(inds)
        if np.any(inds):
          iind = 0
          while inds[iind] and iind < len(inds):
            iind += 1
            fit_coeffs[lg,:,iind] += fit_coeffs[lg,:,inds]
          fit_coeffs[lg,:,inds] = 0

    dof = bases.shape[-1] - bases.shape[0]
    if np.any(singular_inds):
      #for lg in data.shape[0]:
      m = np.einsum('iab,ijb,icb->ijac', bases, weights, bases)*dof
      fit_coeffs_cov = np.linalg.inv(m)

    else:
      m = np.einsum('iab,ijb,icb->ijac', bases, weights, bases)*dof
      fit_coeffs_cov = np.linalg.inv(m)


    # Saving fit parameters
    with h5py.File(fit_results_fileName, 'w') as h5:
      h5.create_dataset("legendre_inds", data=fit_lgInds)
      h5.create_dataset("fit_axis", data=dom)
      h5.create_dataset("fit_coeffs", data=fit_coeffs)
      h5.create_dataset("fit_coeffs_cov", data=fit_coeffs_cov)
      h5.create_dataset("fit_bases", data=bases)
      h5.create_dataset("temperature", data=best_temp)
      h5.create_dataset("intensity", data=best_ints)
      h5.create_dataset("t0", data=fit_t0)
      h5.create_dataset("t0_std", data=fit_t0_std)


  #################################
  #####  Analyze Fit Results  #####
  #################################

  with h5py.File(fit_landscape_fileName, 'r') as h5:
    temperatures  = h5["temperatures"][:]
    intensities   = h5["intensities"][:]
    chiSq         = h5["temp_ints_chiSq"][:]
    all_fit_coeffs  = h5["fit_coeffs"][:]
    best_start_times = h5["temp_ints_t0"][:]
    t0_chiSq      = h5["t0_chiSq"][:]
    t0_fit_times  = h5["t0_fit_times"][:]
    t0_time_shift = h5["t0_time_offset"][...]
    t0_min_chiSq  = h5["t0_min_chiSq"][...]
    t0_temps_chiSq= h5["t0_temps_chiSq"][:]
    t0_ints_chiSq = h5["t0_ints_chiSq"][:]
    fit_t0_chiSq  = h5["t0_chiSq"][:]
  with h5py.File(fit_results_fileName, 'r') as h5:
    fit_bases       = h5["fit_bases"][:]
    fit_coeffs      = h5["fit_coeffs"][:]
    fit_coeffs_cov  = h5["fit_coeffs_cov"][:]
    fit_temperature = h5["temperature"][...]
    fit_intensity   = h5["intensity"][...]
    fit_t0          = h5["t0"][...]
    fit_t0_std      = h5["t0_std"][...]

  if not (len(temperatures) == chiSq.shape[0]\
      and len(intensities) == chiSq.shape[1]):
    raise RuntimeError("Shapes do not align {} {} {}".format(
        len(temperatures), len(intensities), chiSq.shape))
  
  # Get full dataset
  FLAGS.dom_cut_low = FLAGS.plot_dom_cut_low
  FLAGS.dom_cut_high = FLAGS.plot_dom_cut_high
  data, doms, sem, _, sn_map = get_data(FLAGS)
  dom, plt_dom, dom_filter = doms
  weights = 1./sem**2
  fit_coeffs = fit_coeffs[:,dom_filter,:]
  fit_coeffs_cov = fit_coeffs_cov[:,dom_filter,:,:]

  scale_dom = np.ones_like(dom)
  if FLAGS.experiment == "UED":
    with h5py.File("simulations/UED/N2O_sim_diffraction-azmAvg_align-random_Qmax-12.88.h5", "r") as h5:
      scale_dom = 1./h5["atm_diffraction"][dom_filter]


  # Get optimal bases
  folderName = os.path.join(
            FLAGS.basis_dir, FLAGS.experiment, FLAGS.basis_sub_dir,
            FLAGS.experiment + "_%8.6f_%8.6f_"\
                % (fit_temperature, fit_intensity)\
                + fName_suffix)

  all_bases_orig, _, bases_orig = get_bases(
      FLAGS, fit_lgInds, data.shape,
      normalize=False, folderName=folderName)

  bases_interp      = interp1d(sim_times, bases_orig)
  all_bases_interp  = interp1d(sim_times, all_bases_orig)
  times = np.arange(data.shape[-1])*FLAGS.fit_delta_time\
      + fit_t0 + FLAGS.t_cut_low*FLAGS.fit_delta_time
  plt_times = np.concatenate([times, np.array([2*times[-1] - times[-2]])]) 
  
  bases = bases_interp(times)
  all_bases = all_bases_interp(times)

  bases_norm = copy.copy(bases)
  bases_norm[1:] -= np.expand_dims(np.mean(bases[1:], axis=-1), axis=-1)
  norms = np.sqrt(np.sum(bases**2, axis=-1))
  bases_norm = bases_norm/np.expand_dims(norms, axis=-1)
  
  all_bases_norm      = copy.copy(all_bases)
  all_bases_norm[1:] -= np.expand_dims(
      np.mean(all_bases[1:], axis=-1), axis=-1)
  all_norms = np.sqrt(np.sum(all_bases**2, axis=-1))
  all_bases_norm = all_bases_norm/np.expand_dims(all_norms, axis=-1)


  # Get fit results
  fit = np.matmul(fit_coeffs, fit_bases)
  fit_coeffs_norm = fit_coeffs*np.expand_dims(norms, axis=-1)


  h5_output = h5py.File(fit_output_fileName, 'w')

  #####  Plotting  #####
  if FLAGS.setBases is not None:
    plot_folder = os.path.join("plots", FLAGS.experiment,
        FLAGS.basis_sub_dir, "setBases")
  else:
    plot_folder = os.path.join("plots", FLAGS.experiment,
        FLAGS.basis_sub_dir)

  if not os.path.exists(plot_folder):
    os.makedirs(plot_folder)

  # Plotting Bases Overlap
  overlap = np.matmul(all_bases_norm, all_bases_norm.transpose())
  h5_output.create_dataset("basis_overlap", data=overlap)
  fig, ax = plt.subplots()
  X,Y = np.meshgrid(
      np.arange(all_bases_norm.shape[0]+1),
      np.arange(all_bases_norm.shape[0]+1))
  pcm = ax.pcolormesh(X, Y, overlap, vmin=-1, vmax=1, cmap='seismic')
  ax.set_xticks(np.arange(all_bases_norm.shape[0]) + 0.5)
  ax.set_xticklabels(np.arange(all_bases_norm.shape[0])*2)
  ax.set_yticks(np.arange(all_bases_norm.shape[0]) + 0.5)
  ax.set_yticklabels(np.arange(all_bases_norm.shape[0])*2)
  fig.colorbar(pcm, ax=ax)  
  fig.savefig(os.path.join(plot_folder,
      "bases_overlap.png"))
  plt.close()

  # Plotting Chi Squared
  logging.info("Plotting Chi Squared")
  fig, ax = plt.subplots()
  plt_intensities = np.concatenate(
      [intensities, np.array([2*intensities[-1] - intensities[-2]])])
  plt_temperatures = np.concatenate(
      [temperatures, np.array([2*temperatures[-1] - temperatures[-2]])])
  X,Y = np.meshgrid(plt_intensities, plt_temperatures)
  pcm = ax.pcolormesh(X, Y, chiSq[:,:])#, vmax = 0.97*np.amax(chiSq))
  #pcm = ax.pcolormesh(X, Y, chiSq[:,:], norm=colors.LogNorm())#, cmap="binary")
  fig.colorbar(pcm, ax=ax)  
  #ax.set_xlim([1, 8])
  #ax.set_ylim([40, 120])
  ax.set_xlabel('Laser Intensity [$10^{12} W/cm^2$]')
  ax.set_ylabel("Temperature [K]")
  if FLAGS.experiment == "UED":
    print("skipping")
    """
    pcm.set_clim([2.5e-2, 6e-2])
    axins = ax.inset_axes([0.6, 0.5, 0.37, 0.37])
    pcm_ins = axins.pcolormesh(X, Y, chiSq[:,:], vmax=2.9e-2)
    #    norm=colors.LogNorm())#, cmap="binary")
    axins.set_xlim([2, 5])
    axins.set_ylim([70, 100])
    #axins.tick_params(axis='x', colors='w')
    #axins.tick_params(axis='y', colors='w')
    ax.indicate_inset_zoom(axins)
    cb = fig.colorbar(pcm_ins, ax=axins,
        location="top", anchor=(5,5))
    cb.minorticks_off()
    cb.set_ticks([2.8e-2, 2.9e-2])
    #cb.ax.yaxis.set_tick_params(color='w')
    #cb.ax.xaxis.set_tick_params(color='w')
    #plt.setp(plt.getp(cb.ax.axes, 'yticklabels'), color='w')
    #plt.setp(plt.getp(cb.ax.axes, 'xticklabels'), color='w')

    """
  """
  else:
    ax.plot(intensities, chiSq[0,:])
    ax.set_xlim([intensities[0], intensities[-1]])
    ax.set_xlabel('Laser Intensity [$10^{12} W/cm^2$]')
    ax.set_ylabel(r'$\chi^2$')
    ax.text(0.45, 0.95, "Best Fit", fontweight='bold', transform=ax.transAxes)
    ax.text(0.45, 0.9,
        "Laser Intensity: {0:.3g}".format(fit_params["intensity"])\
          + "*$10^{12} W/cm^2$",
        transform=ax.transAxes)
  """
   
  ax.plot(fit_intensity, fit_temperature, 'wo', markersize=3)

  plt.tight_layout()
  print("saving in",os.path.join(plot_folder, "chi_square{}.png".format(suffix)))
  fig.savefig(os.path.join(plot_folder,
      "chi_square{}.png".format(suffix)))
  plt.close()
  

  print("MIN MAX", np.amin(best_start_times),np.amax(best_start_times))
  plt.pcolormesh(
      X, Y, best_start_times,
      vmin=np.amin(best_start_times), vmax=np.amax(best_start_times))
  plt.colorbar()
  plt.xlabel("Laser Intensity [$10^{12} W/cm^2$]")
  plt.ylabel("Temperature [K]")
    
  plt.savefig(os.path.join(plot_folder,
      "start_times{}.png".format(suffix)))
  plt.close()

  # Residuals from the best fit
  logging.info("Plotting Residuals")

  residuals = data - fit
  h5_output.create_dataset("residuals", data=residuals)

  plt_scales = None
  if FLAGS.experiment == "UED":
    plt_scales = [70, 3, 1.5, 1, 0.5, 0.3]
  if FLAGS.experiment == "LCLS":
    plt_scales = np.array([0.01, 0.05, 0.05, 0.5, 0.5, 0.5, 0.5])*0.12

  X,Y = np.meshgrid(plt_times, plt_dom)
  for i in range(fit.shape[0]):

    plt.pcolormesh(X, Y, fit[i,:,:],
          cmap='seismic')
    plt.colorbar()
    plt.savefig(os.path.join(plot_folder,\
        "fit{}_L-{}.png".format(i*2, suffix)))
    plt.close()
    if plt_scales is not None:
      plt.pcolormesh(X, Y, residuals[i,:,:],
          vmin=-1*plt_scales[i], vmax=plt_scales[i],
          cmap='seismic')
    else:
      plt.pcolormesh(X, Y, residuals[i,:,:],
          cmap='seismic')

    plt.colorbar()
    plt.xlabel("Time [ps]")
    if FLAGS.experiment == "UED":
      plt.ylabel('Q [$\AA^{-1}$]')
    elif FLAGS.experiment == "LCLS":
      plt.ylabel('Energy [eV]')
    plt.savefig(os.path.join(plot_folder,\
        "residuals{}_L-{}.png".format(i*2, suffix)))
    plt.close()

    """
    for j in range(data.shape[1]):
      plt.plot(times, data[i,j,:], '-k')
      plt.plot(times, fit[i,j,:], '-b')
      plt.plot(times, bases_norm[1,:]*fit_coeffs_norm[i,j,1], '-g')
      plt.savefig(os.path.join(plot_folder,
          "{}_LOfit_l{}_e{}{}.png".format(FLAGS.experiment, i, j, suffix)))
      plt.close()
    """

  #plt.plot(data[1,10,:])
  #plt.plot(fit[1,10,:])
  #plt.savefig("testingFit.png")
  #plt.close()


  # Analyze residuals as a goodness of fit
  plt_range = None
  if FLAGS.experiment == "UED":
    plt_range = [[10**1, 10**6], [1, 3*10**2], [1, 5*10**2], [1, 10], [0.5, 5], [0.5, 2]]
  elif FLAGS.experiment == "LCLS":
    plt_range = [[10**3, 10**5], [10**3, 10**5], [10**3, 10**5],
        [10**3, 10**5], [5, 7*10**2], [2, 3*10**2]]

  residuals_fft = fft(residuals, axis=-1)[:,:,:int(residuals.shape[-1]/2+1)]
  residuals_pow = np.absolute(residuals_fft)**2
  X,Y = np.meshgrid(np.arange(residuals_pow.shape[-1]+1), plt_dom)
  for i in range(fit.shape[0]):
    if plt_range is not None:
      plt.pcolormesh(X, Y, residuals_pow[i,:,:],
          #vmin=plt_range[i][0], vmax=plt_range[i][1],
          norm=colors.LogNorm(),
          cmap="Blues")
    else:
      plt.pcolormesh(X, Y, residuals_pow[i,:,:],
          norm=colors.LogNorm(),
          cmap="Blues")

    plt.colorbar()
    plt.xlim([0, residuals_pow.shape[-1]-1])
    plt.xlabel("Frequency [2$\pi$/L]")
    if FLAGS.experiment == "UED":
      plt.ylabel('Q [$\AA^{-1}$]')
    elif FLAGS.experiment == "LCLS":
      plt.ylabel('Energy [eV]')
    plt.savefig(os.path.join(plot_folder,\
        "residuals_power{}_L-{}.png".format(i*2, suffix)))
    plt.close()

    plt.plot(np.sum(residuals_pow[i], axis=0))
    plt.xlim([0, residuals_pow.shape[-1]-1])
    plt.xlabel("Frequency [2$\pi$/L]")
    #if plt_range is not None:
    #  plt.ylim(plt_range[i])
    plt.ylabel('Power')
    plt.yscale('log')
    plt.savefig(os.path.join(plot_folder,\
        "residuals_power_sum_L-{}{}.png".format(i*2, suffix)))
    plt.close()
  
  # Plot fit power spectrum
  power = fit_coeffs_norm**2
  plt_range = None
  if FLAGS.experiment == "UED":
    plt_range = [[10**4, 3*10**7], [10**2, 2*10**5], [8*10**1, 6*10**3],
        [10, 2*10**3], [5, 7*10**2], [2, 3*10**2]]
  for lg in range(power.shape[0]):
    for i in range(power.shape[-1]):
      plt.plot(dom, power[lg,:,i], label="Basis {}".format(i*2))
    plt.xlim(dom[0], 2*dom[-1]-dom[-2])
    if FLAGS.experiment == "UED":
      plt.xlabel('Q [$\AA^{-1}$]')
    elif FLAGS.experiment == "LCLS":
      plt.xlabel('Energy [eV]')
    plt.ylabel('Power')
    plt.yscale('log')
    plt.legend()
    plt.savefig(os.path.join(plot_folder,\
        "full_power_spectrum_basis-{}{}.png".format(
          2*lg, suffix)))
    plt.close()

    sum_power = np.sum(power[lg,:,:], axis=0)
    plt.bar(np.arange(len(sum_power)), sum_power, align='center')
    plt.xticks(np.arange(len(sum_power)), np.arange(len(sum_power))*2)
    plt.xlabel('Bases')
    if plt_range is not None:
      plt.ylim(plt_range[lg])
    plt.ylabel('Power')
    plt.yscale('log')
    plt.savefig(os.path.join(plot_folder,\
        "power_spectrum_scaled_basis-{}{}.png".format(
          2*lg, suffix)))
    plt.close()

    plt.bar(np.arange(len(sum_power)), sum_power, align='center')
    plt.xticks(np.arange(len(sum_power)), np.arange(len(sum_power))*2)
    plt.xlabel('Bases')
    plt.ylabel('Power')
    plt.yscale('log')
    plt.savefig(os.path.join(plot_folder,\
        "power_spectrum_basis-{}{}.png".format(
          2*lg, suffix)))
    plt.close()


  # Fit coefficients
  cov_inds = np.arange(fit_coeffs_cov.shape[-1])
  for i,(lg_C, lg_CN) in enumerate(zip(fit_coeffs, fit_coeffs_norm)):
    if FLAGS.setBases is not None:
      X,Y = np.meshgrid(
          np.arange(lg_CN.shape[-1]+1)[:-1] + i + FLAGS.setBases, plt_dom)
    else:
      X,Y = np.meshgrid(fit_lgInds, plt_dom)
    #si = np.argmax(np.abs(lg_C), axis=0)
    #scales = np.abs(lg_CN[si, np.arange(FLAGS.Nbases)])
    scale = np.amax(np.abs(lg_CN))
    fig, ax = plt.subplots()
    lbl = "Time Basis: {}"
    if lg_CN.shape[-1] == 1:
      print(scale_dom.shape, np.sqrt(fit_coeffs_cov[i,:,0]).shape)
      ax.errorbar(plt_dom[:-1], lg_CN[:,0]*scale_dom, fmt='o', color='k', 
          yerr=np.sqrt(fit_coeffs_cov[i,:,0])*np.expand_dims(scale_dom, -1),
          ecolor='gray', label=lbl.format(fit_lgInds[i]))
      if FLAGS.experiment == "UED":
        ax.set_xlabel("q $[\AA^{-1}]$")
      elif FLAGS.experiment == "LCLS":
        ax.set_xlabel("Energy [eV]")
 
    else:
      for j in range(lg_CN.shape[-1]):
        if FLAGS.setBases is not None:
          label = lbl.format(FLAGS.setBases[j] + fit_lgInds[i])
        else:
          label = lbl.format(fit_lgInds[i])
        ax.errorbar(plt_dom[:-1]*scale_dom, lg_CN[:,0],
            yerr=np.sqrt(fit_coeffs_cov[i,:,j])*scale_dom, label=label) 
        #ax.plot(plt_dom[:-1], lg_CN[:,j], label=label)
      #pcm = ax.pcolormesh(X, Y, lg_CN,#/np.expand_dims(scales, axis=0),
      #    cmap='seismic', vmax=scale, vmin=-1*scale)
      #    #norm=colors.SymLogNorm(linthresh=1e-4, linscale=1e-2,
      #    #  vmax=scale, vmin=-1*scale))
      ax.set_xlabel("Basis [L]")
      if FLAGS.experiment == "UED":
        ax.set_ylabel("q $[\AA^{-1}]$")
      elif FLAGS.experiment == "LCLS":
        ax.set_ylabel("Energy [eV]")
    
    if lg_CN.shape[-1] == 1:
      ax.set_xlim([plt_dom[0], plt_dom[-2]])
    else:
      ax.set_xlim([plt_dom[0], plt_dom[-2]])
      #fig.colorbar(pcm, ax=ax)
      #ax.set_xticks(np.arange(FLAGS.Nbases)*2)
    ax.legend()


    """
    yt = plt_dom[-1] + 0.05*(plt_dom[-1] - plt_dom[0])
    for ii, scl in enumerate(scales):
      ax.text(ii*2, yt, r"$\times$ {0:.5g}".format(scl),
          horizontalalignment='center')
    """

    plt.tight_layout()
    fig.savefig(os.path.join(plot_folder,
        "fit_coefficients_lg-{}.png".format(fit_lgInds[i])))
    plt.close()


    # Coefficient Signal to Noise
    SN = np.abs(lg_C)\
        /np.sqrt(np.abs(fit_coeffs_cov[i,:,cov_inds,cov_inds])).transpose()
    #h5_output.create_dataset("fit_coeff_signal2noise", data=SN)

    """
    fig_sn, ax_sn = plt.subplots()
    print("SN", SN)
    pcm_sn = ax_sn.pcolormesh(X, Y, SN,
        norm=colors.LogNorm())
    #cmap='seismic', vmax=scale, vmin=-1*scale)
    ax_sn.set_xlabel("Basis [L]")
    if FLAGS.experiment == "UED":
      ax_sn.set_ylabel("Q $[\AA^{-1}]$")
    elif FLAGS.experiment == "LCLS":
      ax_sn.set_ylabel("Energy [eV]")
    fig_sn.colorbar(pcm_sn, ax=ax_sn)
    ax_sn.set_xticks(np.arange(FLAGS.Nbases)*2)
    
    plt.tight_layout()
    fig_sn.savefig(os.path.join(plot_folder,
        "fit_coefficients_SN{}_lg-{}.png".format(reg_suffix[rg], 2*i)))
    plt.close()
    """


    fig_sn, ax_sn = plt.subplots()
    if FLAGS.setBases is not None:
      for isn in cov_inds:
        ax_sn.plot(Y[:-1,0], SN[:,isn],
          label="Time Basis {}".format(FLAGS.setBases[isn] + fit_lgInds[i]))
    else:
      for isn in cov_inds[1:]:
        ax_sn.plot(Y[:-1,0], SN[:,isn], label="Time Basis {}".format(fit_lgInds[isn]))
    ax_sn.legend()
    ax_sn.set_yscale('log')
    ax_sn.set_xlim([Y[0,0], Y[-1,0]])
    ax_sn.set_ylabel("Signal to Noise")
    if FLAGS.experiment == "UED":
      ax_sn.set_xlabel('Q [$\AA^{-1}$]')
    elif FLAGS.experiment == "LCLS":
      ax_sn.set_xlabel('Energy [eV]')
    fig_sn.tight_layout()
    fig_sn.savefig(os.path.join(
        plot_folder, "fit_coefficients_SN_LO_lg-{}.png".format(fit_lgInds[i])))
    plt.close()



  """
  for i in range(res.shape[1]):
    plt.plot(res[1,i,:])
    plt.savefig("plots/testingCoeff{}.png".format(i))
    plt.close()

    print("sssss",bases.shape,res.shape)
    fit = np.matmul(bases.transpose(), res[1,i,:])
    plt.plot(data[1,i,:])
    plt.plot(fit)
    plt.savefig("plots/testingFig{}.png".format(i))
    plt.close()
  """

  """
  testSum = np.sum(data[1,10:23,:], axis=0)
  res = normal_equation(bases.transpose(), testSum, np.ones(40))
  print(res)

  plt.plot(testSum)
  plt.plot(np.matmul(bases.transpose(), res))
  plt.savefig("plots/testSum.png")
  plt.close()
  """
  #####  Plotting Time STD Measurement  #####
  #with open(time_std_fileName, "rb") as file:
  #  save_time_results = pl.load(file)
      
  #chiSq_time_std, time_std_range,\
  #time_std_temps, time_std_intss = save_time_results
  #  t0_chiSq      = h5["t0_chiSq"]
  #  t0_fit_times  = h5["t0_fit_std_times"]
  #  t0_temps_chiSq= h5["t0_temps_chiSq"]
  #  t0_ints_chiSq = h5["t0_ints_chiSq"]

  #time_std_range_centered = np.array(time_std_range) - fit_t0
  #keep_inds = np.where((time_std_range_centered > -1*FLAGS.range_STD_time_cut) 
  #    & (time_std_range_centered < FLAGS.range_STD_time_cut))[0]
  #t0_chiSq = np.array(t0_chiSq)[keep_inds]
  #t0_fit_times = t0_fit_times[keep_inds]

  def fitFxn(x, c, t0, off):
    return c*(x + t0)**2 + off

  fig, ax = plt.subplots()
  ax.plot(t0_fit_times - t0_time_shift, t0_chiSq, '.k')
  ax.plot(t0_fit_times - t0_time_shift, 
      fitFxn(t0_fit_times, 
        (1./fit_t0_std)**2, 
        t0_time_shift, t0_min_chiSq), '-b')
  ax.set_xlim([t0_fit_times[0], t0_fit_times[-1]])
  ax.set_xlabel("Time [ps]")
  ax.set_ylabel('$\chi^2$')
  ax.text(0.5, 0.9, "Best Fit: {0:.6g} $\pm$ {1:.3g} ps".format(
        fit_t0, fit_t0_std),
      fontsize=15, fontweight='bold',
      transform=ax.transAxes, ha='center', va='center')
  plt.tight_layout()
  fig.savefig(os.path.join(plot_folder,
      "time_std_chiSq{}.png".format(suffix)))
  plt.close()

  fig, ax1 = plt.subplots()

  ax1.set_xlabel("Time [ps]")
  ax1.set_ylabel("Temperature [K]", color="r")
  #ax1.plot(time_std_range - np.mean(np.array(time_std_range)),
  print(t0_fit_times.shape, t0_temps_chiSq.shape)
  ax1.plot(t0_fit_times, t0_temps_chiSq, '-r')

  ax2 = ax1.twinx()
  ax2.set_ylabel("Intensity $[10^{12} W/cm^2]$")
  #ax2.plot(time_std_range - np.mean(np.array(time_std_range)),
  ax2.plot(t0_fit_times, t0_ints_chiSq, '-k')
  fig.savefig(os.path.join(plot_folder, "time_std_temp_ints{}.png".format(suffix)))
  plt.close()

  h5_output.close()


  sys.exit(0)
  ################################
  #####  Calculate Time STD  #####
  ################################


  time_std_temperatures = temperatures
  time_std_intensities  = intensities
  """
  if FLAGS.experiment == "UED":
    time_std_intensities = np.concatenate([
        np.arange(0.05, 1.95, 0.05),
        np.arange(2, 
          FLAGS.ints_high + FLAGS.ints_delta/2,
          FLAGS.ints_delta)])
  """

  time_std_chiSq_fileName = os.path.join(output_prefix,
      "{}_time_std_chiSq{}.pl".format(FLAGS.experiment, suffix))
  chiSq_time_std, time_std_range, time_std_temps, time_std_intss =\
      [], [], [], []

  if os.path.exists(time_std_chiSq_fileName):
    with open(time_std_chiSq_fileName, "rb") as file:
      chiSq_time_std, time_std_range, time_std_temps, time_std_intss =\
          pl.load(file)
  else:

    std_times = np.arange(fit_params["start_time"] - FLAGS.range_STD_time,
        fit_params["start_time"] + FLAGS.range_STD_time + FLAGS.step_STD_time/2,
        FLAGS.step_STD_time)
    min_temp_ind = np.argmin(np.abs(fit_params["temp"] - time_std_temperatures))
    min_ints_ind = np.argmin(np.abs(fit_params["intensity"] - time_std_intensities))
    data_weights_std = np.zeros_like(data_weights)
    data_weights_std[:,1,:] = 1.
  
    pool = Pool(processes=1)
    results = pool.map(partial(time_std_pool,
        data=data,
        sem=sem,
        weights=weights,
        best_temp_ind=fit_params["temp_ind"],
        best_ints_ind=fit_params["ints_ind"],
        data_weights=data_weights_std,
        reg_scale=FLAGS.reg_scale,
        temperatures=time_std_temperatures,
        intensities=time_std_intensities,
        sim_times=sim_times,
        time_delta=FLAGS.fit_delta_time,
        t_cut_low=FLAGS.t_cut_low,
        t_cut_high=FLAGS.t_cut_high,
        experiment=FLAGS.experiment,
        basis_dir=FLAGS.basis_dir,
        fName_suffix=fName_suffix,
        fit_lgInds=fit_lgInds),
        std_times)

    for res in results:
      time, minChiSq, minInds = res
      time_std_range.append(time)
      chiSq_time_std.append(minChiSq)
      time_std_temps.append(time_std_temperatures[minInds[0]])
      time_std_intss.append(time_std_intensities[minInds[1]])

    saved_results = [chiSq_time_std, time_std_range, time_std_temps, time_std_intss]
    # Save ChiSq
    with open(time_std_chiSq_fileName, "wb") as file:
      pl.dump(saved_results, file)

  # Fitting for time std

  def fitFxn(x, c, t0, off):
    return c*(x + t0)**2 + off

  mInd = np.argmin(chiSq_time_std)
  time_std_range_centered = np.array(time_std_range) - time_std_range[mInd]
  keep_inds = np.where((time_std_range_centered > -1*FLAGS.range_STD_time_cut) 
      & (time_std_range_centered < FLAGS.range_STD_time_cut))[0]
  chiSq_time_std = np.array(chiSq_time_std)[keep_inds]
  time_std_range_centered = time_std_range_centered[keep_inds]
  #time_std_range_centered *= 1000
  popt, pcov = curve_fit(fitFxn, time_std_range_centered, chiSq_time_std, 
      [1, 0, np.amin(chiSq_time_std)])
  print("BEST RESULTS")
  print("\tBest Time: {} +/- {}".format(time_std_range[mInd], 1./np.sqrt(popt[0])))
  print(popt)


  #####  Plotting  #####
  fig, ax = plt.subplots()
  ax.plot(time_std_range_centered, chiSq_time_std, '.k')
  ax.plot(time_std_range_centered, fitFxn(time_std_range_centered, *popt), '-b')
  ax.set_xlim([time_std_range_centered[0], time_std_range_centered[-1]])
  ax.set_xlabel("Time [ps]")
  ax.set_ylabel('$\chi^2$')
  ax.text(0.5, 0.9, "Best Fit: {0:.6g} $\pm$ {1:.3g} ps".format(
        time_std_range[mInd], 1./np.sqrt(popt[0])), 
      fontsize=15, fontweight='bold',
      transform=ax.transAxes, ha='center', va='center')
  plt.tight_layout()
  fig.savefig(os.path.join(plot_folder, "time_std_chiSq{}.png".format(suffix)))
  plt.close()

  fig, ax1 = plt.subplots()

  ax1.set_xlabel("Time [ps]")
  ax1.set_ylabel("Temperature [K]", color="r")
  #ax1.plot(time_std_range - np.mean(np.array(time_std_range)),
  ax1.plot(time_std_range,
      time_std_temps, '-r')

  ax2 = ax1.twinx()
  ax2.set_ylabel("Intensity $[10^{12} W/cm^2]$")
  #ax2.plot(time_std_range - np.mean(np.array(time_std_range)),
  ax2.plot(time_std_range,
      time_std_intss, '-k')
  fig.savefig(os.path.join(plot_folder, "time_std_temp_ints{}.png".format(suffix)))
  plt.close()
示例#32
0
def harmonic_vals(classname, uri, channel, param_set, low, high, plot=False):
    """harmonic_vals: Test first five harmonics and check to be within
    certain intervals. This test also requires a devices with TX and RX
    onboard where thetransmit signal can be recovered.Sinuoidal data is
    passed to DMAs, which is then estimated on the RX side.

    parameters:
        uri: type=string
            URI of IIO context of target board/system
        classname: type=string
            Name of pyadi interface class which contain attribute
        channel: type=list
            List of integers or list of list of integers of channels to
            enable through tx_enabled_channels
        param_set: type=dict
            Dictionary of attribute and values to be set before tone is
            generated and received
        low: type=list
            List of minimum values for certain harmonics
        high: type=list
            List of maximum values for certain harmonics
        plot: type=boolean
            Boolean, if set the values are also plotted
    """
    sdr = eval(classname + "(uri='" + uri + "')")
    for p in param_set.keys():
        setattr(sdr, p, param_set[p])

    time.sleep(3)

    N = 2**15
    sdr.tx_cyclic_buffer = True
    sdr.tx_enabled_channels = [channel]
    sdr.tx_buffer_size = N * len(sdr.tx_enabled_channels)
    sdr.rx_enabled_channels = [channel]
    sdr.rx_buffer_size = N * len(sdr.rx_enabled_channels)

    ref = 2**12

    if hasattr(sdr, "sample_rate"):
        RXFS = int(sdr.sample_rate)
    else:
        RXFS = int(sdr.rx_sample_rate)

    fc = RXFS * 0.1
    fc = int(fc / (RXFS / N)) * (RXFS / N)

    full_scale = 0.9
    ts = 1 / float(RXFS)
    t = np.arange(0, N * ts, ts)
    i = np.cos(2 * np.pi * t * fc) * ref * full_scale
    q = np.sin(2 * np.pi * t * fc) * ref * full_scale
    iq = i + 1j * q

    try:
        sdr.tx(iq)
        time.sleep(3)
        for _ in range(10):
            data = sdr.rx()
    except Exception as e:
        del sdr
        raise Exception(e)
    del sdr
    time.sleep(3)

    L = len(data)

    ampl = 1 / L * np.absolute(fft(data))
    ampl = 20 * np.log10(ampl / ref + 10**-20)

    freqs = fftfreq(L, 1 / RXFS)

    _, ml, hm, indxs = spec.find_harmonics(fftshift(ampl),
                                           fftshift(freqs),
                                           num_harmonics=10,
                                           tolerance=0.2)

    ffreqs = fftshift(freqs)
    ffampl = fftshift(ampl)
    if plot:
        import matplotlib.pyplot as plt

        plt.subplot(2, 1, 1)
        plt.plot(data, ".-")
        plt.plot(1, 1, "r.")
        plt.margins(0.1, 0.1)
        plt.xlabel("Time [s]")

        plt.subplot(2, 1, 2)
        plt.plot(fftshift(freqs), fftshift(ampl))
        plt.plot(ffreqs[ml], ffampl[ml], "y.")
        plt.plot(ffreqs[indxs[0:len(hm)]], hm[0:len(hm)], "y.")

        plt.margins(0.1, 0.1)
        plt.annotate("Fundamental", (ffreqs[ml], ffampl[ml]))
        plt.xlabel("Frequency [Hz]")
        plt.tight_layout()
        plt.show()

    assert low[0] <= ffampl[ml] <= high[0]
    for i in range(1, len(low)):
        assert low[i] <= hm[i - 1] <= high[i]
示例#33
0
from numpy.fft import fft
from numpy import array

a = array([1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0])
print(" ".join("%5.3f" % abs(f) for f in fft(a)))
示例#34
0
sample_rate, noised_sigs = wf.read(
    'C:\\Users\\Administrator\\Desktop\\sucai\\da_data\\noised.wav')
times = np.arange(len(noised_sigs)) / sample_rate

mp.figure("Filter", facecolor='lightgray')
mp.subplot(2, 2, 1)
mp.title("Time Domain", fontsize=16)
mp.ylabel("Signal", fontsize=12)
mp.tick_params(labelsize=8)
mp.grid(linestyle=":")
mp.plot(times[:178], noised_sigs[:178], c='dodgerblue', label='Noised Sigs')
mp.legend()

# 获取音频频域信息,绘制频域:频率/能量图像
freqs = nf.fftfreq(times.size, 1 / sample_rate)
noised_ffts = nf.fft(noised_sigs)
noised_pows = np.abs(noised_ffts)
mp.subplot(222)
mp.title("Frequency Domain", fontsize=16)
mp.ylabel("Power", fontsize=12)
mp.tick_params(labelsize=8)
mp.grid(linestyle=":")
mp.semilogy(freqs[freqs > 0],
            noised_pows[freqs > 0],
            c='orangered',
            label='Noised')
mp.legend()

# 将低频噪声去除后绘制音频频域:频率/能量图
fund_freq = freqs[noised_pows.argmax()]
# 找到所有噪声的下标
示例#35
0
文件: ftan.py 项目: jxensing/ftanpy
def ftan(filename,
         freqmin=0.1,
         freqmax=10,
         vmin=1.0,
         vmax=5.0,
         fold=False,
         alpha=10):
    """returns the filename, and computes and saves FTAN amplitudes and group velocities.
    :filename : name of sac file e.g. ABAZ_ETAZ_ZZ.SAC
    :freqmin : minimum frequency for filter
    :freqmax : maximum frequency for filter
    :alpha : adjustable parameter that sets the resolution in the frequency and time domains. Default = 10
    """
    st = read(filename)
    tr = st[0]
    if fold == True:
        tr = fold_trace(tr)
    samp_rate = tr.stats.sampling_rate  #sampling rate
    samp_int = tr.stats.delta  #sampling interval
    t = np.arange(0, len(tr.data) * samp_int, samp_int)  # time vector
    dist = tr.stats.sac["dist"] / 1000

    if freqmax > samp_rate / 2:
        print("Maximum frequency exceeded the Nyquist frequency")
        print("Freqmax = {}, Nyquist = {}".format(str(freqmax),
                                                  str(samp_rate / 2)))
        print("Maximum frequency reset to {}".format(str(samp_rate / 2)))
        freqmax = samp_rate / 2

    tr_ft = fft.fft(tr.data)  # Fourier transform tr into the frequency domain

    #take the analytic component by...
    tr_af = tr_ft.copy()
    tr_af[:len(tr_af) //
          2] *= 2.0  #multiplying all the positive frequencies by two
    tr_af[len(tr_af) //
          2:] *= 0.0  #multiplying all the negative frequencies by zero

    gaussian_filters, frequencies = make_gaussian_filters(tr_ft.real,
                                                          freqmin=freqmin,
                                                          freqmax=freqmax,
                                                          samp_rate=samp_rate,
                                                          dist=dist,
                                                          alpha=alpha)

    filename_amplitudes = ".".join([filename, "amplitudes.csv"])
    headers = [
        "Speed (km/s)", "Centre Period (s)", "Instaneous Period (s)",
        "Amplitude"
    ]

    with open(filename_amplitudes, "w", newline='') as csvfile:
        writer = csv.writer(csvfile)
        writer.writerow(headers)
        group_speeds = []
        gs_c_period = []
        gs_inst_period = []
        envelope_max = []
        phase_at_group_time = []
        neg_errors = []
        pos_errors = []

        for g_filter in gaussian_filters:
            tr_aff = tr_af * np.array(
                g_filter[1]
            )  #applying the filters to the analytic signal in frequency domain
            tr_at = fft.ifft(
                tr_aff
            )  # inverse Fourier transform of the filtered analytic signal
            envelope = np.log(
                np.absolute(tr_at)**2
            )  # taking the logarithm of the square of the filtered analytic signal in the time domain
            #            phase_function = np.unwrap(np.angle(tr_at)) # compute the unwrapped phase function
            phase_function = np.angle(tr_at)  # compute the phase function
            phase_at_group_time.append(phase_function[np.argmax(
                envelope)])  # save phase at group time
            omega_inst = np.diff(phase_function) / (
                samp_int * 2 * np.pi
            )  # compute instaneous frequencies for phase function
            omega_inst = omega_inst[np.argmax(envelope) - 1]

            #            omega_inst = omega_inst[np.argmin(np.abs(omega_inst-g_filter[0]))] # compute instaneous frequencies
            center_period = np.around(float(1 / g_filter[0][0]), decimals=2)
            instantaneous_period = np.around(float(1 / omega_inst), decimals=2)
            #            phi_s = 0 #source phase is zero for ambient noise cross-correlations

            for i, amplitude in enumerate(envelope.real):
                if t[i] != 0 and vmin < dist / t[i] < vmax:
                    speed = float(dist / t[i])
                    writer.writerow([
                        speed, center_period, instantaneous_period, amplitude
                    ])
                else:
                    pass

            #compute group speeds and related data
            if t[np.argmax(envelope)] != 0 and vmin < dist / t[np.argmax(
                    envelope)] < vmax:
                envelope_max.append(max(envelope))
                group_speeds.append(dist / t[np.argmax(envelope)])
                gs_c_period.append(center_period)
                gs_inst_period.append(instantaneous_period)

                #                Error Analysis
                #upper error
                e_up = envelope[np.argmax(envelope):]
                t_up = t[np.argmax(envelope):]
                amp_up = []
                for i, amp in enumerate(e_up):
                    if amp >= max(envelope) - 0.5:
                        amp_up.append([t_up[i], amp])
                    else:
                        break
                if len(amp_up) > 1:
                    pos_error = dist / amp_up[-1][0] - dist / amp_up[0][0]
                else:
                    pos_error = 10
                if pos_error == float("Inf"):
                    neg_error = 10
                pos_errors.append(abs(pos_error))

                #lower error
                e_dwn = envelope[:np.argmax(envelope) + 1]
                e_dwn = e_dwn[::-1]
                t_dwn = t[:np.argmax(envelope) + 1]
                t_dwn = t_dwn[::-1]
                amp_dwn = []
                for i, amp in enumerate(e_dwn):
                    if amp >= max(envelope) - 0.5:
                        amp_dwn.append([t_dwn[i], amp])
                    else:
                        break

                if amp_dwn[-1][0] != 0 and amp_dwn[0][0] != 0:
                    neg_error = dist / amp_dwn[-1][0] - dist / amp_dwn[0][0]
                else:
                    neg_error = 10
#                print(center_period)
                neg_errors.append(abs(neg_error))

    filename_group_speeds = ".".join([filename, "group_speeds", "csv"])
    headers = [
        "Group Speed (km/s)", "Centre Period (s)", "Instaneous Period (s)",
        "Negative Error", "Postive Error", "time (s)", "distance (km)"
    ]
    # headers=["Group Speed (km/s)","Centre Period (s)"]
    group_speeds, gs_c_period, gs_inst_period, phase_at_group_time = trim(
        group_speeds, gs_c_period, gs_inst_period, envelope_max,
        phase_at_group_time)
    with open(filename_group_speeds, "w", newline='') as csvfile:
        writer = csv.writer(csvfile)
        writer.writerow(headers)
        for i, group_speed in enumerate(group_speeds):
            writer.writerow([
                group_speed, gs_c_period[i], gs_inst_period[i], neg_errors[i],
                pos_errors[i], dist / group_speed, dist
            ])
示例#36
0
plt.plot(f3, 'g.-')
plt.xlim(0, n_total)
#
plt.xlabel('samples n', size=14)
#========================================================================
#                                 ESPECTROS
#------------------------------------------------------------------------
'''
numpy.fft.fft:
When the input a is a time-domain signal and A = fft(a): 
. np.abs(A) is its amplitude spectrum; 
. np.abs(A)**2 is its power spectrum; 
. np.angle(A) is the phase spectrum.
'''
N = 2048
ft1 = fft.fft(f1, N)
ft2 = fft.fft(f2, N)
ft3 = fft.fft(f3, N)
ft1_shifted = fft.fftshift(ft1)
ft2_shifted = fft.fftshift(ft2)
ft3_shifted = fft.fftshift(ft3)
ft1_shifted = ft1_shifted.real
ft2_shifted = ft2_shifted.real
ft3_shifted = ft3_shifted.real
aft1 = np.abs(ft1)
aft2 = np.abs(ft2)
aft3 = np.abs(ft3)
aft1_shifted = abs(ft1_shifted)**2
aft2_shifted = abs(ft2_shifted)**2
aft3_shifted = abs(ft3_shifted)**2
#========================================================================
示例#37
0
from scipy.io import wavfile
import math
import matplotlib.pyplot as plot
import numpy.fft as fft
import numpy

sample_freq, sound = wavfile.read('./audio/test3.wav')
sound = sound / (2.**15)
print(sound.shape)

s1 = sound
n = len(s1)
p = fft.fft(s1)
nu = math.ceil((n + 1) / 2.0)
p = p[0:nu]
p = abs(p)
p = p / float(n)
p = p**2

if n % 2 > 0:
    p[1:len(p)] = p[1:len(p)] * 2
else:
    p[1:len(p) - 1] = p[1:len(p) - 1] * 2

fA = numpy.arange(0, nu, 1.0) * (sample_freq / n)
plot.plot(fA / 1000, 10 * math.log10(p), color='k')
xlabel('Frequence (kHZ)')
ylabel('Power (dB)')
示例#38
0
def calc_jitter(ui,
                nui,
                pattern_len,
                ideal_xings,
                actual_xings,
                rel_thresh=6,
                num_bins=99,
                zero_mean=True):
    """
    Calculate the jitter in a set of actual zero crossings, given the ideal crossings and unit interval.

    Inputs:

      - ui               : The nominal unit interval.
      - nui              : The number of unit intervals spanned by the input signal.
      - pattern_len      : The number of unit intervals, before input symbol stream repeats.
      - ideal_xings      : The ideal zero crossing locations of the edges.
      - actual_xings     : The actual zero crossing locations of the edges.
      - rel_thresh       : (optional) The threshold for determining periodic jitter spectral components (sigma).
      - num_bins         : (optional) The number of bins to use, when forming histograms.
      - zero_mean        : (optional) Force the mean jitter to zero, when True.

    Outputs:

      - jitter   : The total jitter.
      - t_jitter : The times (taken from 'ideal_xings') corresponding to the returned jitter values.
      - isi      : The peak to peak jitter due to intersymbol interference.
      - dcd      : The peak to peak jitter due to duty cycle distortion.
      - pj       : The peak to peak jitter due to uncorrelated periodic sources.
      - rj       : The standard deviation of the jitter due to uncorrelated unbounded random sources.
      - tie_ind  : The data independent jitter.
      - thresh   : Threshold for determining periodic components.
      - jitter_spectrum  : The spectral magnitude of the total jitter.
      - tie_ind_spectrum : The spectral magnitude of the data independent jitter.
      - spectrum_freqs   : The frequencies corresponding to the spectrum components.
      - hist        : The histogram of the actual jitter.
      - hist_synth  : The histogram of the extrapolated jitter.
      - bin_centers : The bin center values for both histograms.

    """
    def my_hist(x):
        """
        Calculates the probability mass function (PMF) of the input vector,
        enforcing an output range of [-UI/2, +UI/2], sweeping everything in [-UI, -UI/2] into the first bin,
        and everything in [UI/2, UI] into the last bin.
        """
        hist, bin_edges = histogram(
            x, [-ui] +
            [-ui / 2.0 + i * ui / (num_bins - 2)
             for i in range(num_bins - 1)] + [ui])
        bin_centers = ([-ui / 2.0] + [
            mean([bin_edges[i + 1], bin_edges[i + 2]])
            for i in range(len(bin_edges) - 3)
        ] + [ui / 2.0])

        return (array(list(map(float, hist))) / sum(hist), bin_centers)

    # Check inputs.
    if not ideal_xings.all():
        raise ValueError(
            "calc_jitter(): zero length ideal crossings vector received!")
    if not actual_xings.all():
        raise ValueError(
            "calc_jitter(): zero length actual crossings vector received!")

    # Line up first ideal/actual crossings, and count/validate crossings per pattern.
    ideal_xings = array(ideal_xings) - (ideal_xings[0] - ui / 2.0)
    actual_xings = array(actual_xings) - (actual_xings[0] - ui / 2.0)
    xings_per_pattern = where(ideal_xings > (pattern_len * ui))[0][0]
    if xings_per_pattern % 2 or not xings_per_pattern:
        print("xings_per_pattern:", xings_per_pattern)
        print("len(ideal_xings):", len(ideal_xings))
        print("min(ideal_xings):", min(ideal_xings))
        print("max(ideal_xings):", max(ideal_xings))
        raise AssertionError(
            "pybert_util.calc_jitter(): Odd number of (or, no) crossings per pattern detected!"
        )
    num_patterns = nui // pattern_len

    # Assemble the TIE track.
    i = 0
    jitter = []
    t_jitter = []
    skip_next_ideal_xing = False
    for ideal_xing in ideal_xings:
        if skip_next_ideal_xing:
            t_jitter.append(ideal_xing)
            skip_next_ideal_xing = False
            continue
        # Confine our attention to those actual crossings occuring
        # within the interval [-UI/2, +UI/2] centered around the
        # ideal crossing.
        min_t = ideal_xing - ui / 2.0
        max_t = ideal_xing + ui / 2.0
        while i < len(actual_xings) and actual_xings[i] < min_t:
            i += 1
        if i == len(
                actual_xings
        ):  # We've exhausted the list of actual crossings; we're done.
            break
        if actual_xings[
                i] > max_t:  # Means the xing we're looking for didn't occur, in the actual signal.
            jitter.append(3.0 * ui /
                          4.0)  # Pad the jitter w/ alternating +/- 3UI/4.
            jitter.append(-3.0 * ui /
                          4.0)  # (Will get pulled into [-UI/2, UI/2], later.
            skip_next_ideal_xing = True  # If we missed one, we missed two.
        else:  # Noise may produce several crossings. We find all those
            xings = []  # within the interval [-UI/2, +UI/2] centered
            j = i  # around the ideal crossing, and take the average.
            while j < len(actual_xings) and actual_xings[j] <= max_t:
                xings.append(actual_xings[j])
                j += 1
            tie = mean(xings) - ideal_xing
            jitter.append(tie)
        t_jitter.append(ideal_xing)
    jitter = array(jitter)

    if debug:
        print("mean(jitter):", mean(jitter))
        print("len(jitter):", len(jitter))

    if zero_mean:
        jitter -= mean(jitter)

    # Do the jitter decomposition.
    # - Separate the rising and falling edges, shaped appropriately for averaging over the pattern period.
    tie_risings = jitter.take(list(range(0, len(jitter), 2)))
    tie_fallings = jitter.take(list(range(1, len(jitter), 2)))
    tie_risings.resize(num_patterns * xings_per_pattern // 2)
    tie_fallings.resize(num_patterns * xings_per_pattern // 2)
    tie_risings = reshape(tie_risings, (num_patterns, xings_per_pattern // 2))
    tie_fallings = reshape(tie_fallings,
                           (num_patterns, xings_per_pattern // 2))

    # - Use averaging to remove the uncorrelated components, before calculating data dependent components.
    try:
        tie_risings_ave = tie_risings.mean(axis=0)
        tie_fallings_ave = tie_fallings.mean(axis=0)
        isi = max(tie_risings_ave.ptp(), tie_fallings_ave.ptp())
    except:
        print("xings_per_pattern:", xings_per_pattern)
        print("len(ideal_xings):", len(ideal_xings))
        raise
    isi = min(isi, ui)  # Cap the ISI at the unit interval.
    dcd = abs(mean(tie_risings_ave) - mean(tie_fallings_ave))

    # - Subtract the data dependent jitter from the original TIE track, in order to yield the data independent jitter.
    tie_ave = sum(list(zip(tie_risings_ave, tie_fallings_ave)), ())
    tie_ave = resize(tie_ave, len(jitter))
    tie_ind = jitter - tie_ave

    # - Use spectral analysis to help isolate the periodic components of the data independent jitter.
    # -- Calculate the total jitter spectrum, for display purposes only.
    # --- Make vector uniformly sampled in time, via zero padding where necessary.
    # --- (It's necessary to keep track of those elements in the resultant vector, which aren't paddings; hence, 'valid_ix'.)
    x, valid_ix = make_uniform(t_jitter, jitter, ui, nui)
    y = fft(x)
    jitter_spectrum = abs(y[:len(y) // 2]) / sqrt(
        len(jitter))  # Normalized, in order to make power correct.
    f0 = 1.0 / (ui * nui)
    spectrum_freqs = [i * f0 for i in range(len(y) // 2)]

    # -- Use the data independent jitter spectrum for our calculations.
    tie_ind_uniform, valid_ix = make_uniform(t_jitter, tie_ind, ui, nui)

    # --- Normalized, in order to make power correct, since we grab Rj from the freq. domain.
    # --- (I'm using the length of the vector before zero padding, because zero padding doesn't add energy.)
    # --- (This has the effect of making our final Rj estimate more conservative.)
    y = fft(tie_ind_uniform) / sqrt(len(tie_ind))
    y_mag = abs(y)
    y_mean = moving_average(y_mag, n=len(y_mag) // 10)
    y_var = moving_average((y_mag - y_mean)**2, n=len(y_mag) // 10)
    y_sigma = sqrt(y_var)
    thresh = y_mean + rel_thresh * y_sigma
    y_per = where(y_mag > thresh, y, zeros(
        len(y)))  # Periodic components are those lying above the threshold.
    y_rnd = where(y_mag > thresh, zeros(len(y)),
                  y)  # Random components are those lying below.
    y_rnd = abs(y_rnd)
    rj = sqrt(mean((y_rnd - mean(y_rnd))**2))
    tie_per = real(ifft(y_per)).take(valid_ix) * sqrt(
        len(tie_ind))  # Restoring shape of vector to its original,
    pj = tie_per.ptp()  # non-uniformly sampled state.

    # --- Save the spectrum, for display purposes.
    tie_ind_spectrum = y_mag[:len(y_mag) // 2]

    # - Reassemble the jitter, excluding the Rj.
    # -- Here, we see why it was necessary to keep track of the non-padded elements with 'valid_ix':
    # -- It was so that we could add the average and periodic components back together,
    # -- maintaining correct alignment between them.
    if len(tie_per) > len(tie_ave):
        tie_per = tie_per[:len(tie_ave)]
    if len(tie_per) < len(tie_ave):
        tie_ave = tie_ave[:len(tie_per)]
    jitter_synth = tie_ave + tie_per

    # - Calculate the histogram of original, for comparison.
    hist, bin_centers = my_hist(jitter)

    # - Calculate the histogram of everything, except Rj.
    hist_synth, bin_centers = my_hist(jitter_synth)

    # - Extrapolate the tails by convolving w/ complete Gaussian.
    rv = norm(loc=0.0, scale=rj)
    rj_pdf = rv.pdf(bin_centers)
    rj_pmf = rj_pdf / sum(rj_pdf)
    hist_synth = convolve(hist_synth, rj_pmf)
    tail_len = (len(bin_centers) - 1) // 2
    hist_synth = (
        [sum(hist_synth[:tail_len + 1])] +
        list(hist_synth[tail_len + 1:len(hist_synth) - tail_len - 1]) +
        [sum(hist_synth[len(hist_synth) - tail_len - 1:])])

    return (
        jitter,
        t_jitter,
        isi,
        dcd,
        pj,
        rj,
        tie_ind,
        thresh[:len(thresh) // 2],
        jitter_spectrum,
        tie_ind_spectrum,
        spectrum_freqs,
        hist,
        hist_synth,
        bin_centers,
    )
示例#39
0
import numpy as np
from IPython.display import Audio
import scipy.io.wavfile
from numpy.fft import fft, ifft, fft2, ifft2, fftshift, ifftshift
from scipy import ndimage, misc
from scipy.signal import gaussian

print('Enter File Name without the extension')
name = input()
name_with_extension = name + '.wav'
Omega, f = scipy.io.wavfile.read(name_with_extension)
F = fft(f.T)

# Some useful values
N = len(f)  # total number of samples
L = N / Omega  # length of sound clip (in seconds)
t = np.arange(0, N) * L / N  # array of time stamps for samples

omega = np.fft.fftshift(np.arange(-N / 2, N / 2)) / L

tau = 1550
mask = abs(omega) < tau
Fe = F * mask

inverted_Fe = ifft(Fe)

output_name = name + '_fixed.wav'
scipy.io.wavfile.write(output_name, Omega, (inverted_Fe.astype(f.dtype).T))
Audio(inverted_Fe, rate=Omega)
示例#40
0
def simulator(spectrum,
              isotopomers,
              transitions=[[-0.5, 0.5]],
              nt=90,
              number_of_sidebands=128):

    B0 = spectrum["magnetic_flux_density"]
    spin_frequency = spectrum["rotor_frequency"]
    rotor_angle = spectrum["rotor_angle"]

    frequency_scaling_factor = spectrum["gyromagnetic_ratio"] * B0

    number_of_points = spectrum["number_of_points"]
    spectral_width = spectrum["spectral_width"]
    reference_offset = spectrum["reference_offset"]
    frequency = (np.arange(number_of_points) / number_of_points) - 0.5
    frequency *= spectral_width
    frequency += reference_offset
    increment = frequency[1] - frequency[0]

    if spin_frequency < 1.0e-3:
        spin_frequency = 1.0e9
        rotor_angle = 0.0
        number_of_sidebands = 1

    shift_half_bin = 0.5

    # orientations
    cos_alpha, cos_beta, orientation_amp = polar_coordinates(nt)
    orientation_amp /= np.float64(number_of_sidebands)
    n_orientation = cos_beta.size

    # sideband freq
    vr_freq = fftfreq(number_of_sidebands, d=1.0 / number_of_sidebands)
    vr_freq *= spin_frequency / increment

    # wigner matrix
    wigner_2 = wigner_matrices(2, cos_beta)

    # rotor to lab frame transformation
    lab_vector_2 = rotation_lab(2, rotor_angle)

    # pre phase
    pre_phase = pre_phase_components(number_of_sidebands, spin_frequency)

    # allocate memory for calculations
    R2_out = np.empty((n_orientation, 5), dtype=np.complex128)
    spectrum = np.zeros(number_of_points)

    shape = (n_orientation, number_of_sidebands)
    temp = np.empty(shape, dtype=np.complex128)
    sideband_amplitude = np.empty(shape, dtype=np.float64)
    local_frequency = np.empty(n_orientation, dtype=np.float64)
    freq_offset = np.empty(n_orientation, dtype=np.float64)
    offset = np.empty(number_of_sidebands, dtype=np.float64)

    # start calculating the spectrum for every site in every isotopomer.
    for isotopomer in isotopomers:

        sites = isotopomer["sites"]
        spec = np.zeros(number_of_points)

        for transition in transitions:
            for site in sites:
                iso = site["isotropic_chemical_shift"]
                if iso.unit.physical_type == "dimensionless":
                    iso = iso.value * frequency_scaling_factor
                else:
                    iso = iso.value

                zeta = site["shielding_symmetric"]["anisotropy"]
                if zeta.unit.physical_type == "dimensionless":
                    zeta = zeta.value * frequency_scaling_factor
                else:
                    zeta = zeta.value

                eta = site["shielding_symmetric"]["asymmetry"]

                # Hailtonian
                # nuclear shielding
                R0, R2 = NS(iso, zeta, eta)
                scale = tf.p(transition[1], transition[0])
                R0 *= scale
                R2 *= scale

                local_frequency_offset = (shift_half_bin +
                                          (R0 - frequency[0]) / increment)

                # rotation from PAS to Rotor frame over all orientations
                R2_out = rotation(2,
                                  R2,
                                  wigner_matrix=wigner_2,
                                  cos_alpha=cos_alpha)
                # rotation from rotor to lab frame over all orientations
                R2_out *= lab_vector_2

                # calculating side-band amplitudes
                temp[:] = fft(exp(dot(R2_out, pre_phase[2:7])), axis=-1)
                sideband_amplitude[:] = temp.real**2 + temp.imag**2
                sideband_amplitude *= orientation_amp[:, np.newaxis]

                # calculating local frequencies
                local_frequency[:] = R2_out[:, 2].real / increment

                # interpolate in-between the frequencies to generate a smooth spectrum.
                offset[:] = vr_freq + local_frequency_offset

                # print("before", default_timer() - start0)
                for j, shift in enumerate(offset):
                    if int(shift) >= 0 and int(shift) <= number_of_points:
                        freq_offset[:] = shift + local_frequency
                        # This is the slowest part of the code.
                        averager(spec, freq_offset, nt, sideband_amplitude[:,
                                                                           j])
                        # np.vectorize(averager(spec, freq_offset,
                        #                       nt, sideband_amplitude[:, j]))

                # print("time for computing site", default_timer() - start0)
        # average over all spins
        spectrum += spec * isotopomer["abundance"]

    return frequency, spectrum
示例#41
0
        from afsiggen import afSigGen
        a = afSigGen()
        a.start()
        a.level = -30
        a.output = True
        print d.level_correction
        cpx = d.get_trace()
        print d.calc_mean_power()
    if 0:
        d.start_dig()
        I, Q = d.captmem(2000)
        d.get_level_correction()
        print d.calc_mean_power(I, Q)
        I, Q = d.get_trace(1000)
        print d.calc_mean_power(I, Q)
        plt.plot(20 * log10(absolute(fft.fftshift(fft.fft(I + 1j * Q)))))
        plt.show()
        d.stop_dig()
        #print get_func("Capture_IQ_GetAbsSampleTime", ses, 0)

        #print get_func("Trigger_Source_Get", ses)

        #print get_func("Capture_PipeliningEnable_Get", ses)

        #do_func('Capture_PipeliningEnable_Set', ses, 1)

        #do_func('Trigger_SwTriggerMode_Set', ses, 1) #set software trigger mode to armed
        #print get_func('Trigger_SwTriggerMode_Get', ses)

        #print get_func('Capture_IQ_TriggerCount_Get', ses)
示例#42
0
 def fftd(self):
     """performs FFT on level corrected data for display"""
     fftd = 20 * log10(absolute(fft.fftshift(fft.fft(self.cpx_lc))))
     return fftd - fftd.max() + self.calc_mean_power()
示例#43
0
    nobs = 200  #10000
    ar = [1, 0.0]
    ma = [1, 0.0]
    ar2 = np.zeros(nobs)
    ar2[:2] = [1, -0.9]

    uni = np.zeros(nobs)
    uni[0] = 1.
    #arrep = signal.lfilter(ma, ar, ar2)
    #marep = signal.lfilter([1],arrep, uni)
    # same faster:
    arcomb = np.convolve(ar, ar2, mode='same')
    marep = signal.lfilter(ma, arcomb, uni)  #[len(ma):]
    print(marep[:10])
    mafr = fft.fft(marep)

    rvs = np.random.normal(size=nobs)
    datafr = fft.fft(rvs)
    y = fft.ifft(mafr * datafr)
    print(np.corrcoef(np.c_[y[2:], y[1:-1], y[:-2]], rowvar=0))

    arrep = signal.lfilter([1], marep, uni)
    print(arrep[:20])  # roundtrip to ar
    arfr = fft.fft(arrep)
    yfr = fft.fft(y)
    x = fft.ifft(arfr * yfr).real  #imag part is e-15
    # the next two are equal, roundtrip works
    print(x[:5])
    print(rvs[:5])
    print(np.corrcoef(np.c_[x[2:], x[1:-1], x[:-2]], rowvar=0))
示例#44
0
    kk = 0
    for name in dirnames:
        if os.path.isfile(name) == True:
            title = tvector[kk]
            vdatavals = np.loadtxt(name).view(complex)
            N = len(vdatavals)
            dict[title]=np.append([x],[vdatavals],axis=0)
            kk=kk+1
    h=h+1



### STEP 2: Find the sideband values and the carrier wave location

# Perform an FFT of the y values
yhat =fft(y) # Fourier amplitudes
yhat1 = 1/N*np.abs(yhat) # Normalized fourier amplitudes

# Define some constants/constant vectors
L = x[-1]-x[0]+(x[1]-x[0])
k=NLS.kvec(N)
#sv = np.array([-3,-2,-1,0,1,2,3]) # The sideband vector
sv = [] # the sideband vector
for j in range(len(yhat1)):
    if yhat1[j]>0.00000000001:
        sv.append(j)

lll = len(sv)

# Find max Fourier amplitude and location
mt = max(yhat1) # The max amplitude (m)
示例#45
0
    通过采样数与采样周期求得傅里叶变换分解所得的曲线的频率序列

    2.nf.fft(原函数值序列)
    通过原函数值的序列经过傅里叶变换得到一个复数数组,复数的模代表的是振幅,复数的辐角代表初相位

    3.nf.ifft()
    通过一个复数数组经过逆向傅里叶变换得到一个合成的函数值数组
"""
import numpy.fft as nf
import numpy as np
import matplotlib.pyplot as mp

x = np.linspace(0, 4 * np.pi, num=1000)
# y = np.zeros(1000)
y = 4 * np.pi * np.sin(x)
for i in range(2, 10001):
    y += 4 / (2 * i - 1) * np.pi * np.sin((2 * i - 1) * x)
mp.subplot(121)
# mp.plot(x, y, label='fang bo')

req = nf.fft(y)
y_ = nf.ifft(req)
mp.plot(x, y, label='fang bo', color='red')

mp.subplot(122)
freq = nf.fftfreq(y_.size, x[1] - x[0])
power = np.abs(req)
mp.plot(freq, power, label='freq', color='red')
mp.legend()
mp.show()
示例#46
0
    diam = 2*eps        # Aperture diameter we are using
    N_diam = Lx/diam     # How many Diameters is our window [-1, 1]
    # Very useful the fftfreq. It calculates the frequencies array for
    # a given number of samples N, which span a window of size N_diam
    freq = fftshift(fftfreq(n=N, d=N_diam/N))

    # Useless sanity checks
    plt.figure()
    plt.plot(x, y * mask_y)
    plt.show()

    masked = y * mask_y

    pup0 = masked * np.exp(1j * masked)

    f = fftshift(fft(pup0, norm='ortho'))
    psf0 = (np.abs(f))**2
    PEAK = np.max(psf0)
    psf0 /= PEAK

    plt.figure()
    plt.plot(psf0)
    # plt.show()

    # Compare the Nominal PSF to that of a PSF with aberrations
    # in the form of Cosine Waves of a given spatial frequency [cycles / Diameter]
    plt.figure()
    plt.plot(freq, psf0, linestyle='--')        # Nominal PSF for reference
    for fx in np.arange(2, 8, 2):       # [cycles / Diam]

        ee = 0.1
示例#47
0
def cps(a, b):
    return fft.fft(a) * conjugate(fft.fft(b))  # bdb fft 10%
示例#48
0
        temp = temp + abs(spectrum[i]**2)

    return sqrt(2 * temp) / 15000


if __name__ == "__main__":
    from numpy.random import randn
    from numpy.fft import fft, ifft, rfft, irfft
    import numpy

    f = 100
    N = 15000

    t = numpy.linspace(0, 1, N)
    x = 1.414 * numpy.sin(2 * numpy.pi * f * t)
    X = fft(x)

    #    n = 17
    #    x = randn(n)
    #    X = fft(x)
    #    rX = rfft(x)

    rms = numpy.sqrt(numpy.mean(x**2))

    print(halfselfrms_fft(X))
    print(selfrms_fft(X))
    print(rms_flat(x))
    print(rms_flat(ifft(X)))
    print(rms_fft(X))

    # Accurate for odd n:
示例#49
0
sample_size = 2**7
sample_t = np.linspace(0, 4, sample_size, dtype=np.float64)
sample_y = signal(sample_t) + state.standard_normal(sample_size)
sample_d = 4. / (sample_size - 1)  # Spacing for linspace array
true_signal = signal(sample_t)

fig1, ax1 = plt.subplots()
ax1.plot(sample_t, sample_y, "k.", label="Noisy signal")
ax1.plot(sample_t, true_signal, "k--", label="True signal")

ax1.set_title("Sample signal with noise")
ax1.set_xlabel("Time")
ax1.set_ylabel("Amplitude")
ax1.legend()

spectrum = fft.fft(sample_y)

freq = fft.fftfreq(sample_size, sample_d)
pos_freq_i = np.arange(1, sample_size // 2, dtype=int)

psd = np.abs(spectrum[pos_freq_i])**2 + np.abs(spectrum[-pos_freq_i])**2

fig2, ax2 = plt.subplots()
ax2.plot(freq[pos_freq_i], psd)
ax2.set_title("PSD of the noisy signal")
ax2.set_xlabel("Frequency")
ax2.set_ylabel("Density")

filtered = pos_freq_i[psd < 1e4]

new_spec = np.zeros_like(spectrum)
示例#50
0
x = ((n - 2) > 0) * exp(-(n - 1) / 4) * (-1)
x[2] = 1
xt = exp(-((t - 2) > 0) * (t - 3) / 5)

fig1 = figure(figsize=(4, 3), num=1)
ax1 = fig1.add_subplot(111)
ml_1, sl_1, bl_1 = ax1.stem(n, x)
plt.setp(ml_1, 'markerfacecolor', 'k', 'markersize', 8, 'marker', 's')
plt.setp(sl_1, 'color', 'b', 'linewidth', 2)
plt.setp(bl_1, 'linewidth', 2)
ax1.set_xlabel(r'$n \; \rightarrow$')
ax1.set_ylabel(r'$x[n] \; \rightarrow$')
fig1.tight_layout()

X = fft(x) / NFFT

#fig1, axes = plt.subplots(nrows=4, ncols=4)

gs33 = gridspec.GridSpec(3, 3)
gs33.update(left=0.15, wspace=0.1, hspace=0.1, right=0.99, top=0.99)

fig2 = figure(figsize=(8, 6), num=2)

bbox_props = dict(boxstyle="Round, pad=0.3", fc="white", ec="k", lw=0.5)
A = 0.11
COS = True
for i in range(3):
    for j in range(3):
        k = 3 * i + j
        ax_i = fig2.add_subplot(gs33[i, j])
示例#51
0
def fourier_fit(x,
                y,
                n_predict=0,
                x_smooth=None,
                n_pts=n_pts_smooth,
                n_harm=default_fourier_n_harm):
    """
    Creates a Fourier fit of a NumPy array. Also supports extrapolation.
    Credit goes to https://gist.github.com/tartakynov/83f3cd8f44208a1856ce.

    Parameters
    ----------
    x, y: numpy.ndarray
        1D NumPy arrays of the x and y values to fit to.
        Must not contain NaNs.
    n_predict: int
        The number of points to extrapolate.
        The points will be spaced evenly by the mean spacing of values in `x`.
    x_smooth: list-like, optional
        The exact x values to interpolate for. Supercedes `n_pts`.
    n_pts: int, optional
        The number of evenly spaced points spanning the range of `x` to interpolate for.
    n_harm: int
        The number of harmonics to use. A higher value yields a closer fit.

    Returns
    -------
    x_smooth, y_smooth: numpy.ndarray
        The smoothed x and y values of the curve fit.
    """
    if x_smooth is None:
        x_smooth_inds = np.linspace(0, len(x) - 1, n_pts)
        x_smooth = np.interp(x_smooth_inds, np.arange(len(x)), x)
    n_predict_smooth = int((len(x_smooth) / len(x)) * n_predict)
    # These points are evenly spaced for the fourier fit implementation we use.
    # More points are selected than are in `x_smooth` so we can interpolate accurately.
    fourier_mult_pts = 2
    x_smooth_fourier = np.linspace(x_smooth.min(), x_smooth.max(),
                                   fourier_mult_pts * len(x_smooth))
    y_smooth_fourier = np.interp(x_smooth_fourier, x, y)
    n_predict_smooth_fourier = int(
        (len(x_smooth_fourier) / len(x)) * n_predict)

    # Perform the Fourier fit and extrapolation.
    n = y_smooth_fourier.size
    t = np.arange(0, n)
    p = np.polyfit(t, y_smooth_fourier, 1)  # find linear trend in arr
    x_notrend = y_smooth_fourier - p[0] * t  # detrended arr
    x_freqdom = fft.fft(x_notrend)  # detrended arr in frequency domain
    f = fft.fftfreq(n)  # frequencies
    # sort indexes by frequency, lower -> higher
    indexes = list(range(n))
    indexes.sort(key=lambda i: np.absolute(x_freqdom[i]))
    indexes.reverse()
    t = np.arange(0, n + n_predict_smooth_fourier)
    restored_sig = np.zeros(t.size)
    for i in indexes[:1 + n_harm * 2]:
        ampli = np.absolute(x_freqdom[i]) / n  # amplitude
        phase = np.angle(x_freqdom[i])  # phase
        restored_sig += ampli * np.cos(2 * np.pi * f[i] * t + phase)
    y_smooth_fourier = restored_sig + p[0] * t

    # Find the points in `x_smooth_fourier` that are near to points in `x_smooth`
    # and then interpolate the y values to match the new x values.
    x_smooth = x_smooth_fourier[np.searchsorted(x_smooth_fourier, x_smooth)]
    # Ensure `x_smooth` includes the extrapolations.
    mean_x_smooth_space = np.diff(x_smooth).mean()
    x_predict_smooth = np.linspace(
        x_smooth[-1] + mean_x_smooth_space,
        x_smooth[-1] + mean_x_smooth_space * n_predict_smooth,
        n_predict_smooth)
    x_smooth = np.concatenate((x_smooth, x_predict_smooth))
    # Ensure `x_smooth_fourier` includes the extrapolations.
    mean_x_smooth_fourier_space = np.diff(x_smooth).mean()
    x_predict_smooth_fourier = \
        np.linspace(
            x_smooth_fourier[-1] + mean_x_smooth_fourier_space,
            x_smooth_fourier[-1] + mean_x_smooth_fourier_space * n_predict_smooth_fourier,
            n_predict_smooth_fourier)
    x_smooth_fourier = np.concatenate(
        (x_smooth_fourier, x_predict_smooth_fourier))
    y_smooth = np.interp(x_smooth, x_smooth_fourier, y_smooth_fourier)
    return x_smooth, y_smooth
示例#52
0
def test_filters():
    """Test low-, band-, high-pass, and band-stop filters plus resampling."""
    rng = np.random.RandomState(0)
    sfreq = 100
    sig_len_secs = 15

    a = rng.randn(2, sig_len_secs * sfreq)

    # let's test our catchers
    for fl in ['blah', [0, 1], 1000.5, '10ss', '10']:
        pytest.raises((ValueError, TypeError),
                      filter_data,
                      a,
                      sfreq,
                      4,
                      8,
                      None,
                      fl,
                      1.0,
                      1.0,
                      fir_design='firwin')
    for nj in ['blah', 0.5]:
        pytest.raises(ValueError,
                      filter_data,
                      a,
                      sfreq,
                      4,
                      8,
                      None,
                      1000,
                      1.0,
                      1.0,
                      n_jobs=nj,
                      phase='zero',
                      fir_design='firwin')
    pytest.raises(ValueError,
                  filter_data,
                  a,
                  sfreq,
                  4,
                  8,
                  None,
                  100,
                  1.,
                  1.,
                  fir_window='foo')
    pytest.raises(ValueError,
                  filter_data,
                  a,
                  sfreq,
                  4,
                  8,
                  None,
                  10,
                  1.,
                  1.,
                  fir_design='firwin')  # too short
    # > Nyq/2
    pytest.raises(ValueError,
                  filter_data,
                  a,
                  sfreq,
                  4,
                  sfreq / 2.,
                  None,
                  100,
                  1.0,
                  1.0,
                  fir_design='firwin')
    pytest.raises(ValueError,
                  filter_data,
                  a,
                  sfreq,
                  -1,
                  None,
                  None,
                  100,
                  1.0,
                  1.0,
                  fir_design='firwin')
    # these should work
    create_filter(None, sfreq, None, None)
    create_filter(a, sfreq, None, None, fir_design='firwin')
    create_filter(a, sfreq, None, None, method='iir')

    # check our short-filter warning:
    with pytest.warns(RuntimeWarning, match='attenuation'):
        # Warning for low attenuation
        filter_data(a, sfreq, 1, 8, filter_length=256, fir_design='firwin2')
    with pytest.warns(RuntimeWarning, match='Increase filter_length'):
        # Warning for too short a filter
        filter_data(a, sfreq, 1, 8, filter_length='0.5s', fir_design='firwin2')

    # try new default and old default
    freqs = fftfreq(a.shape[-1], 1. / sfreq)
    A = np.abs(fft(a))
    kwargs = dict(fir_design='firwin')
    for fl in ['auto', '10s', '5000ms', 1024, 1023]:
        bp = filter_data(a, sfreq, 4, 8, None, fl, 1.0, 1.0, **kwargs)
        bs = filter_data(a, sfreq, 8 + 1.0, 4 - 1.0, None, fl, 1.0, 1.0,
                         **kwargs)
        lp = filter_data(a,
                         sfreq,
                         None,
                         8,
                         None,
                         fl,
                         10,
                         1.0,
                         n_jobs=2,
                         **kwargs)
        hp = filter_data(lp, sfreq, 4, None, None, fl, 1.0, 10, **kwargs)
        assert_allclose(hp, bp, rtol=1e-3, atol=2e-3)
        assert_allclose(bp + bs, a, rtol=1e-3, atol=1e-3)
        # Sanity check ttenuation
        mask = (freqs > 5.5) & (freqs < 6.5)
        assert_allclose(np.mean(np.abs(fft(bp)[:, mask]) / A[:, mask]),
                        1.,
                        atol=0.02)
        assert_allclose(np.mean(np.abs(fft(bs)[:, mask]) / A[:, mask]),
                        0.,
                        atol=0.2)
        # now the minimum-phase versions
        bp = filter_data(a,
                         sfreq,
                         4,
                         8,
                         None,
                         fl,
                         1.0,
                         1.0,
                         phase='minimum',
                         **kwargs)
        bs = filter_data(a,
                         sfreq,
                         8 + 1.0,
                         4 - 1.0,
                         None,
                         fl,
                         1.0,
                         1.0,
                         phase='minimum',
                         **kwargs)
        assert_allclose(np.mean(np.abs(fft(bp)[:, mask]) / A[:, mask]),
                        1.,
                        atol=0.11)
        assert_allclose(np.mean(np.abs(fft(bs)[:, mask]) / A[:, mask]),
                        0.,
                        atol=0.3)

    # and since these are low-passed, downsampling/upsampling should be close
    n_resamp_ignore = 10
    bp_up_dn = resample(resample(bp, 2, 1, n_jobs=2), 1, 2, n_jobs=2)
    assert_array_almost_equal(bp[n_resamp_ignore:-n_resamp_ignore],
                              bp_up_dn[n_resamp_ignore:-n_resamp_ignore], 2)
    # note that on systems without CUDA, this line serves as a test for a
    # graceful fallback to n_jobs=1
    bp_up_dn = resample(resample(bp, 2, 1, n_jobs='cuda'), 1, 2, n_jobs='cuda')
    assert_array_almost_equal(bp[n_resamp_ignore:-n_resamp_ignore],
                              bp_up_dn[n_resamp_ignore:-n_resamp_ignore], 2)
    # test to make sure our resamling matches scipy's
    bp_up_dn = sp_resample(sp_resample(bp,
                                       2 * bp.shape[-1],
                                       axis=-1,
                                       window='boxcar'),
                           bp.shape[-1],
                           window='boxcar',
                           axis=-1)
    assert_array_almost_equal(bp[n_resamp_ignore:-n_resamp_ignore],
                              bp_up_dn[n_resamp_ignore:-n_resamp_ignore], 2)

    # make sure we don't alias
    t = np.array(list(range(sfreq * sig_len_secs))) / float(sfreq)
    # make sinusoid close to the Nyquist frequency
    sig = np.sin(2 * np.pi * sfreq / 2.2 * t)
    # signal should disappear with 2x downsampling
    sig_gone = resample(sig, 1, 2)[n_resamp_ignore:-n_resamp_ignore]
    assert_array_almost_equal(np.zeros_like(sig_gone), sig_gone, 2)

    # let's construct some filters
    iir_params = dict(ftype='cheby1', gpass=1, gstop=20, output='ba')
    iir_params = construct_iir_filter(iir_params, 40, 80, 1000, 'low')
    # this should be a third order filter
    assert iir_params['a'].size - 1 == 3
    assert iir_params['b'].size - 1 == 3
    iir_params = dict(ftype='butter', order=4, output='ba')
    iir_params = construct_iir_filter(iir_params, 40, None, 1000, 'low')
    assert iir_params['a'].size - 1 == 4
    assert iir_params['b'].size - 1 == 4
    iir_params = dict(ftype='cheby1', gpass=1, gstop=20)
    iir_params = construct_iir_filter(iir_params, 40, 80, 1000, 'low')
    # this should be a third order filter, which requires 2 SOS ((2, 6))
    assert iir_params['sos'].shape == (2, 6)
    iir_params = dict(ftype='butter', order=4, output='sos')
    iir_params = construct_iir_filter(iir_params, 40, None, 1000, 'low')
    assert iir_params['sos'].shape == (2, 6)

    # check that picks work for 3d array with one channel and picks=[0]
    a = rng.randn(5 * sfreq, 5 * sfreq)
    b = a[:, None, :]

    a_filt = filter_data(a,
                         sfreq,
                         4,
                         8,
                         None,
                         400,
                         2.0,
                         2.0,
                         fir_design='firwin')
    b_filt = filter_data(b,
                         sfreq,
                         4,
                         8, [0],
                         400,
                         2.0,
                         2.0,
                         fir_design='firwin')

    assert_array_equal(a_filt[:, None, :], b_filt)

    # check for n-dimensional case
    a = rng.randn(2, 2, 2, 2)
    with pytest.warns(RuntimeWarning, match='longer'):
        pytest.raises(ValueError, filter_data, a, sfreq, 4, 8,
                      np.array([0, 1]), 100, 1.0, 1.0)

    # check corner case (#4693)
    want_length = int(round(_length_factors['hamming'] * 1000. / 0.5))
    want_length += (want_length % 2 == 0)
    assert want_length == 6601
    h = create_filter(np.empty(10000),
                      1000.,
                      l_freq=None,
                      h_freq=55.,
                      h_trans_bandwidth=0.5,
                      method='fir',
                      phase='zero-double',
                      fir_design='firwin',
                      verbose=True)
    assert len(h) == 6601
    h = create_filter(np.empty(10000),
                      1000.,
                      l_freq=None,
                      h_freq=55.,
                      h_trans_bandwidth=0.5,
                      method='fir',
                      phase='zero',
                      fir_design='firwin',
                      filter_length='7s',
                      verbose=True)
    assert len(h) == 7001
    h = create_filter(np.empty(10000),
                      1000.,
                      l_freq=None,
                      h_freq=55.,
                      h_trans_bandwidth=0.5,
                      method='fir',
                      phase='zero-double',
                      fir_design='firwin',
                      filter_length='7s',
                      verbose=True)
    assert len(h) == 8193  # next power of two
# set up plot
fig, ax = plt.subplots()
data = np.arange(fft_size // 16)
data[0] = 8000
line, = ax.plot(data)
plt.pause(0.001)

# begin playback
_play_with_simpleaudio(audio)
tstart = time.time()

# update plot
while True:
    # retrieve current sample
    tnow = time.time() - tstart
    current = int(tnow * sample_rate)

    # break if there are not enough samples (end of song)
    if current + fft_size >= len(l_samples):
        break

    # perform fft
    data = abs(fft(l_samples[current:current + fft_size])) / fft_size +\
           abs(fft(r_samples[current:current + fft_size])) / fft_size
    data = np.array(data[:fft_size // 16])

    # plot amplitudes
    line.set_ydata(data)
    fig.canvas.draw()
    fig.canvas.flush_events()
def extract_features(data):
    # Each two_d_data gives us a 128*12 matrix
    features_total = []
    for two_d_data in data:
        two_d_data_transpose = two_d_data.T
        features = []
        # Each row gives us a 128 matrix
        # Add Gyro Jerk
        gyro_jerk_x = np.gradient(two_d_data_transpose[6])
        gyro_jerk_y = np.gradient(two_d_data_transpose[7])
        gyro_jerk_z = np.gradient(two_d_data_transpose[8])
        two_d_data_transpose = np.append(two_d_data_transpose,
                                         gyro_jerk_x.reshape(
                                             1, len(gyro_jerk_x)),
                                         axis=0)
        two_d_data_transpose = np.append(two_d_data_transpose,
                                         gyro_jerk_y.reshape(
                                             1, len(gyro_jerk_y)),
                                         axis=0)
        two_d_data_transpose = np.append(two_d_data_transpose,
                                         gyro_jerk_z.reshape(
                                             1, len(gyro_jerk_z)),
                                         axis=0)
        # Add Acc Jerk
        acc_jerk_x = np.gradient(two_d_data_transpose[9])
        acc_jerk_y = np.gradient(two_d_data_transpose[10])
        acc_jerk_z = np.gradient(two_d_data_transpose[11])
        two_d_data_transpose = np.append(two_d_data_transpose,
                                         acc_jerk_x.reshape(
                                             1, len(acc_jerk_x)),
                                         axis=0)
        two_d_data_transpose = np.append(two_d_data_transpose,
                                         acc_jerk_y.reshape(
                                             1, len(acc_jerk_y)),
                                         axis=0)
        two_d_data_transpose = np.append(two_d_data_transpose,
                                         acc_jerk_z.reshape(
                                             1, len(acc_jerk_z)),
                                         axis=0)
        # Add Acc magnitude
        acc_data_magnitude = obtain_magnitude(two_d_data_transpose[9],
                                              two_d_data_transpose[10],
                                              two_d_data_transpose[11])
        two_d_data_transpose = np.append(two_d_data_transpose,
                                         acc_data_magnitude.reshape(
                                             1, len(acc_data_magnitude)),
                                         axis=0)
        # Add Acc jerk magnitude
        acc_data_jerk_magnitude = obtain_magnitude(two_d_data_transpose[15],
                                                   two_d_data_transpose[16],
                                                   two_d_data_transpose[17])
        two_d_data_transpose = np.append(two_d_data_transpose,
                                         acc_data_jerk_magnitude.reshape(
                                             1, len(acc_data_jerk_magnitude)),
                                         axis=0)
        # Add Gyro magnitude
        gyro_data_magnitude = obtain_magnitude(two_d_data_transpose[6],
                                               two_d_data_transpose[7],
                                               two_d_data_transpose[8])
        two_d_data_transpose = np.append(two_d_data_transpose,
                                         gyro_data_magnitude.reshape(
                                             1, len(gyro_data_magnitude)),
                                         axis=0)
        # Add Gyro jerk magnitude
        gyro_data_jerk_magnitude = obtain_magnitude(two_d_data_transpose[12],
                                                    two_d_data_transpose[13],
                                                    two_d_data_transpose[14])
        two_d_data_transpose = np.append(two_d_data_transpose,
                                         gyro_data_jerk_magnitude.reshape(
                                             1, len(gyro_data_jerk_magnitude)),
                                         axis=0)
        # Add Frequency body Acceleration
        f_body_acc_x = np.abs(
            fft(np.asanyarray(two_d_data_transpose[9])) / 128)
        f_body_acc_y = np.abs(
            fft(np.asanyarray(two_d_data_transpose[10])) / 128)
        f_body_acc_z = np.abs(
            fft(np.asanyarray(two_d_data_transpose[11])) / 128)
        two_d_data_transpose = np.append(two_d_data_transpose,
                                         f_body_acc_x.reshape(
                                             1, len(f_body_acc_x)),
                                         axis=0)
        two_d_data_transpose = np.append(two_d_data_transpose,
                                         f_body_acc_y.reshape(
                                             1, len(f_body_acc_y)),
                                         axis=0)
        two_d_data_transpose = np.append(two_d_data_transpose,
                                         f_body_acc_z.reshape(
                                             1, len(f_body_acc_z)),
                                         axis=0)
        # Add Frequency body jerk Acceleration
        f_body_jerk_acc_x = np.abs(
            fft(np.asanyarray(two_d_data_transpose[15])) / 128)
        f_body_jerk_acc_y = np.abs(
            fft(np.asanyarray(two_d_data_transpose[16])) / 128)
        f_body_jerk_acc_z = np.abs(
            fft(np.asanyarray(two_d_data_transpose[17])) / 128)
        two_d_data_transpose = np.append(two_d_data_transpose,
                                         f_body_jerk_acc_x.reshape(
                                             1, len(f_body_jerk_acc_x)),
                                         axis=0)
        two_d_data_transpose = np.append(two_d_data_transpose,
                                         f_body_jerk_acc_y.reshape(
                                             1, len(f_body_jerk_acc_y)),
                                         axis=0)
        two_d_data_transpose = np.append(two_d_data_transpose,
                                         f_body_jerk_acc_z.reshape(
                                             1, len(f_body_jerk_acc_z)),
                                         axis=0)
        # Add Frequency body acceleration magnitude
        f_body_acc_magnitude = np.abs(
            fft(np.asanyarray(two_d_data_transpose[18])) / 128)
        two_d_data_transpose = np.append(two_d_data_transpose,
                                         f_body_acc_magnitude.reshape(
                                             1, len(f_body_acc_magnitude)),
                                         axis=0)
        # Add Frequency body acceleration jerk magnitude
        f_body_acc_jerk_magnitude = np.abs(
            fft(np.asanyarray(two_d_data_transpose[19])) / 128)
        two_d_data_transpose = np.append(two_d_data_transpose,
                                         f_body_acc_jerk_magnitude.reshape(
                                             1,
                                             len(f_body_acc_jerk_magnitude)),
                                         axis=0)
        # Add Frequency body gyro
        f_body_gyro_x = np.abs(
            fft(np.asanyarray(two_d_data_transpose[6])) / 128)
        f_body_gyro_y = np.abs(
            fft(np.asanyarray(two_d_data_transpose[7])) / 128)
        f_body_gyro_z = np.abs(
            fft(np.asanyarray(two_d_data_transpose[8])) / 128)
        two_d_data_transpose = np.append(two_d_data_transpose,
                                         f_body_gyro_x.reshape(
                                             1, len(f_body_gyro_x)),
                                         axis=0)
        two_d_data_transpose = np.append(two_d_data_transpose,
                                         f_body_gyro_y.reshape(
                                             1, len(f_body_gyro_y)),
                                         axis=0)
        two_d_data_transpose = np.append(two_d_data_transpose,
                                         f_body_gyro_z.reshape(
                                             1, len(f_body_gyro_z)),
                                         axis=0)
        # Add Frequency body gyro mag
        f_body_gyro_mag = np.abs(
            fft(np.asanyarray(two_d_data_transpose[20])) / 128)
        two_d_data_transpose = np.append(two_d_data_transpose,
                                         f_body_gyro_mag.reshape(
                                             1, len(f_body_gyro_mag)),
                                         axis=0)
        # Add Frequency body gyro jerk mag
        f_body_gyro_jerk_mag = np.abs(
            fft(np.asanyarray(two_d_data_transpose[21])) / 128)
        two_d_data_transpose = np.append(two_d_data_transpose,
                                         f_body_gyro_jerk_mag.reshape(
                                             1, len(f_body_gyro_jerk_mag)),
                                         axis=0)

        # Start extracting time features
        features.extend(
            extract_features_t_body_acc(two_d_data_transpose[9],
                                        two_d_data_transpose[10],
                                        two_d_data_transpose[11]))

        # features.extend(extract_features_t_body_acc_jerk(two_d_data_transpose[15], two_d_data_transpose[16],
        #                                                  two_d_data_transpose[17]))
        features.extend(
            extract_features_t_body_acc_mag(two_d_data_transpose[18]))
        # features.extend(extract_features_t_body_acc_jerk_mag(two_d_data_transpose[21]))
        # features.extend(extract_features_t_gravity_acc(two_d_data_transpose[3], two_d_data_transpose[4],
        #                                                two_d_data_transpose[5]))
        features.extend(
            extract_features_t_body_gyro(two_d_data_transpose[6],
                                         two_d_data_transpose[7],
                                         two_d_data_transpose[8]))
        # features.extend(extract_features_t_body_gyro_jerk(two_d_data_transpose[12], two_d_data_transpose[13],
        #                                                   two_d_data_transpose[14]))
        features.extend(
            extract_features_t_body_gyro_mag(two_d_data_transpose[20]))
        # features.extend(extract_features_t_body_gyro_jerk_mag(two_d_data_transpose[21]))

        # Start extracting frequency features
        features.extend(
            extract_features_f_body_acc(two_d_data_transpose[22],
                                        two_d_data_transpose[23],
                                        two_d_data_transpose[24]))
        # features.extend(extract_features_f_body_acc_jerk(two_d_data_transpose[25], two_d_data_transpose[26],
        #                                                  two_d_data_transpose[27]))
        features.extend(
            extract_features_f_body_acc_mag(two_d_data_transpose[28]))
        # features.extend(extract_features_f_body_acc_jerk_mag(two_d_data_transpose[29]))

        features.extend(
            extract_features_f_body_gyro(two_d_data_transpose[30],
                                         two_d_data_transpose[31],
                                         two_d_data_transpose[32]))
        features.extend(
            extract_features_f_body_gyro_mag(two_d_data_transpose[33]))
        features.extend(
            extract_features_f_body_gyro_jerk_mag(two_d_data_transpose[34]))
        features_total.append(features)

    return pd.DataFrame(features_total)
示例#55
0
plt.subplot(212)
plt.plot(t_in_s, measurement)

plt.figure(2)
plt.subplot(2, 1, 2)
powerSpectrum, frequenciesFound, time, imageAxis = plt.specgram(measurement,
                                                                Fs=1000)

plt.ylabel('measurement')

# b,a =butter(btype='low')
# y=lfilter(b,a,measurement)

plt.figure(1)
plt.subplot(212)
X = fft(measurement)
plt.plot(np.abs(X))

# print (X)
# plt.plot(fr,X_m)

# plt.ylabel('measurement') 20*np.log(abs(fft_result)

# plt.figure(2)
# plt.subplot(212)
# powerSpectrum,frequenciesFound,time,imageAxis=plt.specgram(s1,Fs=100)
# plt.xlabel("time")
# plt.xlabel("Frequency")
# plt.show()
# plt.figure(3)
# plt.subplot(2,1,2)
示例#56
0
 def transform(self, X, y=None):
     spect = fft(X)
     rows, cols = spect.shape
     rval = np.stack((spect.real, spect.imag), -1).reshape((rows, -1))
     debug('FFTEncoder rval.shape:', rval.shape)
     return rval
示例#57
0
def main():
    from matplotlib.pyplot import semilogx, plot, show, xlim, ylim, figure, legend, subplot, bar
    from numpy.fft import fft, fftfreq, fftshift, ifft
    from numpy import log10, linspace, interp, angle, array, concatenate

    N = 2048 * 2 * 2
    fs = float(SAMPLING_RATE)
    Nchannels = 20
    low_freq = 20.

    impulse = zeros(N)
    impulse[N / 2] = 1
    f = 1000.
    #impulse = sin(2*pi*f*arange(0, N/fs, 1./fs))

    #[ERBforward, ERBfeedback] = MakeERBFilters(fs, Nchannels, low_freq)
    #y = ERBFilterBank(ERBforward, ERBfeedback, impulse)

    BandsPerOctave = 3
    Nbands = NOCTAVE * BandsPerOctave

    [B, A, fi, fl, fh] = octave_filters(Nbands, BandsPerOctave)
    y, zfs = octave_filter_bank(B, A, impulse)
    #print "Filter lengths without decimation"
    #for b, a in zip(B, A):
    #	print len(b), len(a)

    response = 20. * log10(abs(fft(y)))
    freqScale = fftfreq(N, 1. / fs)

    figure()
    subplot(211)

    for i in range(0, response.shape[0]):
        semilogx(freqScale[0:N / 2], response[i, 0:N / 2])

    xlim(fs / 2000, fs)
    ylim(-70, 10)

    subplot(212)
    m = 0
    for f in fi:
        p = 10. * log10((y[m]**2).mean())
        m += 1
        semilogx(f, p, 'ko')

    Ndec = 3
    fc = 0.5
    # other possibilities
    #(bdec, adec) = ellip(Ndec, 0.05, 30, fc)
    #print bdec
    #(bdec, adec) = cheby1(Ndec, 0.05, fc)
    #(bdec, adec) = butter(Ndec, fc)
    (bdec, adec) = iirdesign(0.48,
                             0.50,
                             0.05,
                             70,
                             analog=0,
                             ftype='ellip',
                             output='ba')
    #bdec = firwin(30, fc)
    #adec = [1.]

    figure()
    subplot(211)

    response = 20. * log10(abs(fft(impulse)))
    plot(fftshift(freqScale), fftshift(response), label="impulse")

    y = lfilter(bdec, adec, impulse)
    response = 20. * log10(abs(fft(y)))
    plot(fftshift(freqScale), fftshift(response), label="lowpass")

    ydec = y[::2].repeat(2)
    response = 20. * log10(abs(fft(ydec)))
    plot(fftshift(freqScale),
         fftshift(response),
         label="lowpass + dec2 + repeat2")

    ydec2 = interp(list(range(0, len(y))), list(range(0, len(y), 2)), y[::2])
    response = 20. * log10(abs(fft(ydec2)))
    plot(fftshift(freqScale),
         fftshift(response),
         label="lowpass + dec2 + interp2")

    ydec3 = y[::2]
    response = 20. * log10(abs(fft(ydec3)))
    freqScale2 = fftfreq(N / 2, 2. / fs)
    plot(freqScale2, fftshift(response), label="lowpass + dec2")

    legend(loc="lower left")

    subplot(212)
    plot(list(range(0, len(impulse))), impulse, label="impulse")
    plot(list(range(0, len(impulse))), y, label="lowpass")
    plot(list(range(0, len(impulse))), ydec, label="lowpass + dec2 + repeat2")
    plot(list(range(0, len(impulse))), ydec2, label="lowpass + dec2 + interp2")
    plot(list(range(0, len(impulse), 2)), ydec3, label="lowpass + dec2")
    legend()

    [boct, aoct, fi, flow,
     fhigh] = octave_filters_oneoctave(Nbands, BandsPerOctave)
    y, dec, zfs = octave_filter_bank_decimation(bdec, adec, boct, aoct,
                                                impulse)
    #print "Filter lengths with decimation"
    #print len(bdec), len(adec)
    #for b, a in zip(boct, aoct):
    #	print len(b), len(a)

    figure()
    subplot(211)

    for yone, d in zip(y, dec):
        response = 20. * log10(abs(fft(yone)) * d)
        freqScale = fftfreq(N / d, 1. / (fs / d))
        semilogx(freqScale[0:N / (2 * d)], response[0:N / (2 * d)])

    xlim(fs / 2000, fs)
    ylim(-70, 10)

    subplot(212)
    m = 0
    for i in range(0, NOCTAVE):
        for f in fi:
            p = 10. * log10((y[m]**2).mean())
            semilogx(f / dec[m], p, 'ko')
            m += 1

    [boct, aoct, fi, flow,
     fhigh] = octave_filters_oneoctave(Nbands, BandsPerOctave)
    y1, dec, zfs = octave_filter_bank_decimation(bdec, adec, boct, aoct,
                                                 impulse[0:N / 2])
    y2, dec, zfs = octave_filter_bank_decimation(bdec,
                                                 adec,
                                                 boct,
                                                 aoct,
                                                 impulse[N / 2:],
                                                 zis=zfs)

    y = []
    for y1one, y2one in zip(y1, y2):
        y += [concatenate((y1one, y2one))]

    figure()
    subplot(211)

    for yone, d in zip(y, dec):
        response = 20. * log10(abs(fft(yone)) * d)
        freqScale = fftfreq(N / d, 1. / (fs / d))
        semilogx(freqScale[0:N / (2 * d)], response[0:N / (2 * d)])

    xlim(fs / 2000, fs)
    ylim(-70, 10)

    subplot(212)
    m = 0
    for i in range(0, NOCTAVE):
        for f in fi:
            p = 10. * log10((y[m]**2).mean())
            semilogx(f / dec[m], p, 'ko')
            m += 1

    generate_filters_params()

    show()
示例#58
0
	def TFanalysis(self, sj, cnds, cnd_header, time_period, tf_name, base_period = None, elec_oi = 'all',factor = None, method = 'hilbert', flip = None, base_type = 'conspec', downsample = 1, min_freq = 5, max_freq = 40, num_frex = 25, cycle_range = (3,12), freq_scaling = 'log'):
		'''
		Time frequency analysis using either morlet waveforms or filter-hilbert method for time frequency decomposition

		Add option to subtract ERP to get evoked power
		Add option to match trial number

		Arguments
		- - - - - 
		sj (int): subject number
		cnds (list): list of conditions as stored in behavior file
		cnd_header (str): key in behavior file that contains condition info
		base_period (tuple | list): time window used for baseline correction. 
		time_period (tuple | list): time window of interest
		tf_name (str): name of analysis. Used to create unique file location
		elec_oi (str | list): If not all, analysis are limited to specified electrodes 
		factor (dict): limit analysis to a subset of trials. Key(s) specifies column header
		method (str): specifies whether hilbert or wavelet convolution is used for time-frequency decomposition
		flip (dict): flips a subset of trials. Key of dictionary specifies header in beh that contains flip info 
		List in dict contains variables that need to be flipped. Note: flipping is done from right to left hemifield
		base_type (str): specifies whether DB conversion is condition specific ('conspec') or averaged across conditions ('conavg').
						If Z power is Z-transformed (condition specific). 
		downsample (int): factor used for downsampling (aplied after filtering). Default is no downsampling
		min_freq (int): minimum frequency for TF analysis
		max_freq (int): maximum frequency for TF analysis
		num_frex (int): number of frequencies in TF analysis
		cycle_range (tuple): number of cycles increases in the same number of steps used for scaling
		freq_scaling (str): specify whether frequencies are linearly or logarithmically spaced. 
							If main results are expected in lower frequency bands logarithmic scale 
							is adviced, whereas linear scale is advised for expected results in higher
							frequency bands
		Returns
		- - - 
		
		wavelets(array): 


	
		'''

		# read in data
		eegs, beh = self.selectTFData(self.laplacian, factor)
		times = self.EEG.times
		if elec_oi == 'all':
			picks = mne.pick_types(self.EEG.info, eeg=True, exclude='bads')
			ch_names = list(np.array(self.EEG.ch_names)[picks])
		else:
			ch_names = elec_oi	

		# flip subset of trials (allows for lateralization indices)
		if flip != None:
			key = list(flip.keys())[0]
			eegs = topoFlip(eegs, beh[key], self.EEG.ch_names, left = flip.get(key))

		# get parameters
		nr_time = eegs.shape[-1]
		nr_chan = eegs.shape[1] if elec_oi == 'all' else len(elec_oi)
		if method == 'wavelet':
			wavelets, frex = self.createMorlet(min_freq = min_freq, max_freq = max_freq, num_frex = num_frex, 
									cycle_range = cycle_range, freq_scaling = freq_scaling, 
									nr_time = nr_time, s_freq = self.EEG.info['sfreq'])
		
		elif method == 'hilbert':
			frex = [(i,i + 4) for i in range(min_freq, max_freq, 2)]
			num_frex = len(frex)	

		if type(base_period) in [tuple,list]:
			base_s, base_e = [np.argmin(abs(times - b)) for b in base_period]
		idx_time = np.where((times >= time_period[0]) * (times <= time_period[1]))[0]  
		idx_2_save = np.array([idx for i, idx in enumerate(idx_time) if i % downsample == 0])

		# initiate dicts
		tf = {'ch_names':ch_names, 'times':times[idx_2_save], 'frex': frex}
		tf_base = {'ch_names':ch_names, 'times':times[idx_2_save], 'frex': frex}
		base = {}
		plot_dict = {}

		# loop over conditions
		for c, cnd in enumerate(cnds):
			print(cnd)
			tf.update({cnd: {}})
			tf_base.update({cnd: {}})
			base.update({cnd: np.zeros((num_frex, nr_chan))})

			if cnd != 'all':
				cnd_idx = np.where(beh[cnd_header] == cnd)[0]
			else:
				cnd_idx = np.arange(beh[cnd_header].size)	

			l_conv = 2**self.nextpow2(nr_time * cnd_idx.size + nr_time - 1)
			raw_conv = np.zeros((cnd_idx.size, num_frex, nr_chan, idx_2_save.size), dtype = complex) 

			# loop over channels
			for idx, ch in enumerate(ch_names[:nr_chan]):
				# find ch_idx
				ch_idx = self.EEG.ch_names.index(ch)

				print('Decomposed {0:.0f}% of channels ({1} out {2} conditions)'.format((float(idx)/nr_chan)*100, c + 1, len(cnds)), end='\r')

				# fft decomposition
				if method == 'wavelet':
					eeg_fft = fft(eegs[cnd_idx,ch_idx].ravel(), l_conv)    # eeg is concatenation of trials after ravel

				# loop over frequencies
				for f in range(num_frex):

					if method == 'wavelet':
						# convolve and get analytic signal (OUTPUT DIFFERS SLIGHTLY FROM MATLAB!!! DOUBLE CHECK)
						m = ifft(eeg_fft * fft(wavelets[f], l_conv), l_conv)
						m = m[:nr_time * cnd_idx.size + nr_time - 1]
						m = np.reshape(m[math.ceil((nr_time-1)/2 - 1):int(-(nr_time-1)/2-1)], 
									  (nr_time, -1), order = 'F').T 
					elif method == 'hilbert': # NEEDS EXTRA CHECK
						X = eegs[cnd_idx,ch_idx].ravel()
						m = self.hilbertMethod(X, frex[f][0], frex[f][1], s_freq)
						m = np.reshape(m, (-1, times.size))	

					# populate
					raw_conv[:,f,idx] = m[:,idx_2_save]
					
					# baseline correction (actual correction is done after condition loop)
					if type(base_period) in [tuple,list]:
						base[cnd][f,idx] = np.mean(abs(m[:,base_s:base_e])**2)

			# update cnd dict with phase values (averaged across trials) and power values
			tf[cnd]['power'] = abs(raw_conv)**2
			tf[cnd]['phase'] = abs(np.mean(np.exp(np.angle(raw_conv) * 1j), axis = 0))

		# baseline normalization
		for cnd in cnds:
			if base_type == 'conspec': #db convert: condition specific baseline
				tf_base[cnd]['base_power'] = 10*np.log10(tf[cnd]['power']/np.repeat(base[cnd][:,:,np.newaxis],idx_2_save.size,axis = 2))
			elif base_type == 'conavg':	
				con_avg = np.mean(np.stack([base[cnd] for cnd in cnds]), axis = 0)
				tf_base[cnd]['base_power'] = 10*np.log10(tf[cnd]['power']/np.repeat(con_avg[:,:,np.newaxis],idx_2_save.size,axis = 2))
			elif base_type == 'Z':
				print('For permutation procedure it is assumed that it is as if all stimuli of interest are presented right')
				tf_base[cnd]['Z_power'], z_info = self.permuted_Z(tf[cnd]['power'],ch_names, num_frex, idx_2_save.size) 
				tf_base.update(dict(z_info = z_info))
			if base_type in ['conspec','conavg']:
				tf[cnd]['base_power'] = np.mean(tf[cnd]['base_power'], axis = 0)

			# power values can now safely be averaged
			tf[cnd]['power'] = np.mean(tf[cnd]['power'], axis = 0)

		# save TF matrices
		with open(self.FolderTracker(['tf',method,tf_name],'{}-tf.pickle'.format(sj)) ,'wb') as handle:
			pickle.dump(tf, handle)		

		with open(self.FolderTracker(['tf',method,tf_name],'{}-tf_base.pickle'.format(sj)) ,'wb') as handle:
			pickle.dump(tf_base, handle)	
示例#59
0
        self.y1 = 0


if __name__ == '__main__':
    import matplotlib
    matplotlib.use('TkAgg')
    import matplotlib.pyplot as plt
    from numpy.fft import fft

    fs = 44.1E3
    lpf = LowPass(500, fs)
    sig = np.zeros(4096)
    out = np.zeros(4096)
    sig[0] = 1.0

    for k in range(len(sig)):
        out[k] = lpf.work(sig[k])

    freqs = np.arange(len(sig)) / float(len(sig)) * fs
    sig_fft = fft(sig)
    out_fft = fft(out)
    resp = out_fft / sig_fft

    plt.subplot(121)
    plt.plot(freqs, 20.0 * np.log10(np.abs(resp)))

    plt.subplot(122)
    plt.plot(freqs, np.angle(resp) * 180.0 / np.pi)

    plt.show()
def calculo_DTFS(x):
    return fft(x) / len(x)