def fft_based(input_signal, filter_coefficients, boundary=0): """applied fft if the signal is too short to be splitted in windows Params : input_signal : the audio signal filter_coefficients : coefficients of the chirplet bank boundary : manage the bounds of the signal Returns : audio signal with application of fast Fourier transform """ num_coeffs = filter_coefficients.size half_size = num_coeffs//2 if boundary == 0:#ZERO PADDING input_signal = np.lib.pad(input_signal, (half_size, half_size), 'constant', constant_values=0) filter_coefficients = np.lib.pad(filter_coefficients, (0, input_signal.size-num_coeffs), 'constant', constant_values=0) newx = ifft(fft(input_signal)*fft(filter_coefficients)) return newx[num_coeffs-1:-1] elif boundary == 1:#symmetric input_signal = concatenate([flipud(input_signal[:half_size]), input_signal, flipud(input_signal[half_size:])]) filter_coefficients = np.lib.pad(filter_coefficients, (0, input_signal.size-num_coeffs), 'constant', constant_values=0) newx = ifft(fft(input_signal)*fft(filter_coefficients)) return newx[num_coeffs-1:-1] else:#periodic return roll(ifft(fft(input_signal)*fft(filter_coefficients, input_signal.size)), -half_size).real
def time_fft(data2, samplerate=100., inverse=False,hann=False): ''' IF N_PARAMS() EQ 0 then begin print, 'time_fft, data, samplerate=samplerate, inverse=inverse,hann=hann' return, -1 ENDIF ''' data=data2 if hann: w1 = pl.hanning(len(data)) data = data*w1 #frequency axis: freqs = pl.arange(1+len(data)/2)/float(len(data)/2.0)*samplerate/2. # wut. if len(data) % 2 == 0 : freqs = pl.concatenate((freqs, -freqs[1:(len(freqs)-1)][::-1])) if len(data) % 2 != 0 : freqs = pl.concatenate((freqs, -freqs[1:len(freqs)][::-1])) response = pl.fft(data) if inverse : response = pl.ifft(data) out = {'freq': freqs, 'real': response.real, 'im': response.imag, 'abs': abs(response)} return out
def decodefft(finf, data, dropheights=False): #output: decoded data with the number of heights reduced #two variables are added to the finfo class: #deco_num_hei, deco_hrange #data must be arranged: # (channels,heights,times) (C-style, profs change faster) #fft along the entire(n=None) acquired heights(axis=1), stores in data num_chan = data.shape[0] num_ipps = data.shape[2] num_codes = finf.subcode.shape[0] num_bauds = finf.subcode.shape[1] NSA = finf.num_hei + num_bauds - 1 uppower = py.ceil(py.log2(NSA)) extra = int(2**uppower - finf.num_hei) NSA = int(2**uppower) fft_code = py.fft(finf.subcode, n=NSA, axis=1).conj() data = py.fft(data, n=NSA, axis=1) #n= None: no cropped data or padded zeros for ch in range(num_chan): for ipp in range(num_ipps): code_i = ipp % num_codes data[ch, :, ipp] = data[ch, :, ipp] * fft_code[code_i, :] data = py.ifft(data, n=NSA, axis=1) #fft along the heightsm if dropheights: return data[:, :-extra - (num_bauds - 1), :] else: return data[:, :-extra, :]
def f( t, *args ): for i,arg in enumerate(args): params[ free_params[i] ] = arg tshift = params[-1] ideal = fmodel( t, *args ) irf = cspline1d_eval( self.irf_generator, t-tshift, dx=self.irf_dt, x0=self.irf_t0 ) convoluted = pylab.real(pylab.ifft( pylab.fft(ideal)*pylab.fft(irf) )) # very small imaginary anyway return convoluted
def decodefft(finf,data, dropheights = False): #output: decoded data with the number of heights reduced #two variables are added to the finfo class: #deco_num_hei, deco_hrange #data must be arranged: # (channels,heights,times) (C-style, profs change faster) #fft along the entire(n=None) acquired heights(axis=1), stores in data num_chan = data.shape[0] num_ipps = data.shape[2] num_codes = finf.subcode.shape[0] num_bauds = finf.subcode.shape[1] NSA = finf.num_hei + num_bauds - 1 uppower = py.ceil(py.log2(NSA)) extra = int(2**uppower - finf.num_hei) NSA = int(2**uppower) fft_code = py.fft(finf.subcode,n = NSA,axis=1).conj() data = py.fft(data,n=NSA,axis=1) #n= None: no cropped data or padded zeros for ch in range(num_chan): for ipp in range(num_ipps): code_i = ipp % num_codes data[ch,:,ipp] = data[ch,:,ipp] * fft_code[code_i,:] data=py.ifft(data,n=NSA,axis=1) #fft along the heightsm if dropheights: return data[:,:-extra-(num_bauds-1),:] else: return data[:,:-extra,:]
def fft_smoothing(input_signal, sigma): """smooth the fast transform Fourier Params : input_signal : audio signal sigma : relative to the length of the output signal Returns : a shorter and smoother signal """ size_signal = input_signal.size #shorten the signal new_size = int(floor(10.0 * size_signal * sigma)) half_new_size = new_size // 2 fftx = fft(input_signal) short_fftx = [] for ele in fftx[:half_new_size]: short_fftx.append(ele) for ele in fftx[-half_new_size:]: short_fftx.append(ele) apodization_coefficients = generate_apodization_coeffs( half_new_size, sigma, size_signal) #apply the apodization coefficients short_fftx[:half_new_size] *= apodization_coefficients short_fftx[half_new_size:] *= flipud(apodization_coefficients) realifftxw = ifft(short_fftx).real return realifftxw
def my_transform(x,dir): # my_transform - perform either FFT with energy conservation. # Works on array of size (w,w,a,b) on the 2 first dimensions. w = np.shape(x)[0] if dir == 1 : y = np.transpose(pyl.fft(np.transpose(x)))/np.sqrt(w) else : y = np.transpose(pyl.ifft(np.transpose(x)*np.sqrt(w))) return y
def fast_fracdiff(x, d): T = len(x) np2 = int(2**np.ceil(np.log2(2 * T - 1))) k = np.arange(1, T) b = (1, ) + tuple(np.cumprod((k - d - 1) / k)) z = (0, ) * (np2 - T) z1 = b + z z2 = tuple(x) + z dx = pl.ifft(pl.fft(z1) * pl.fft(z2)) return np.real(dx[0:T])
def FourierDerivative(f): """ this derivatie just works for periodic 2*pi multiple series have to figure out how to make that work for any function """ N = np.size(f) n = np.arange(0, N) # df discrete differential operator df = np.complex(0, 1) * py.fftshift(n - N / 2) dfdt = py.ifft(df * py.fft(f)) return py.real(dfdt)
def FourierDerivative(f): """ this derivatie just works for periodic 2*pi multiple series have to figure out how to make that work for any function """ N = np.size(f) n = np.arange(0,N) # df discrete differential operator df = np.complex(0,1)*py.fftshift(n-N/2) dfdt = py.ifft( df*py.fft(f) ) return py.real(dfdt)
def fast_fracdiff(x, cols, d): for col in cols: T = len(x[col]) np2 = int(2**np.ceil(np.log2(2 * T - 1))) k = np.arange(1, T) b = (1, ) + tuple(np.cumprod((k - d - 1) / k)) z = (0, ) * (np2 - T) z1 = b + z z2 = tuple(x[col]) + z dx = pl.ifft(pl.fft(z1) * pl.fft(z2)) x[col + "_frac"] = np.real(dx[0:T]) return x
def _ConvFft(signal, FilterKernel): """ Convolution with fft much faster approach works exactly as convolve(x,y) """ ss = numpy.size(signal); fs = numpy.size(FilterKernel) # padd zeros all until they have the size N+M-1 signal = numpy.append(signal, numpy.zeros(fs+ss-1-ss)); FilterKernel = numpy.append(FilterKernel, numpy.zeros(fs+ss-1-fs)); signal = pylab.real(pylab.ifft(pylab.fft(signal)*pylab.fft(FilterKernel))); return signal[:fs+ss-1];
def _ConvFft(signal, filterkernel): """ Convolution with fft much faster approach works exatcly as convolve(x,y) """ ss = numpy.size(signal) fs = numpy.size(filterkernel) # padd zeros all until they have the size N+M-1 signal = numpy.append(signal, numpy.zeros(fs + ss - 1 - ss)) filterkernel = numpy.append(filterkernel, numpy.zeros(fs + ss - 1 - fs)) signal = pylab.real(pylab.ifft( pylab.fft(signal) * pylab.fft(filterkernel))) return signal[:fs + ss - 1]
def __gauss(sacobj, Tn, alpha): """ Return envelope and gaussian filtered data """ import pylab as pl data = pl.array(sacobj.data) delta = sacobj.delta Wn = 1 / float(Tn) Nyq = 1 / (2 * delta) old_size = data.size pad_size = 2**(int(pl.log2(old_size)) + 1) data.resize(pad_size) spec = pl.fft(data) spec.resize(pad_size) W = pl.array(pl.linspace(0, Nyq, pad_size)) Hn = spec * pl.exp(-1 * alpha * ((W - Wn) / Wn)**2) Qn = complex(0, 1) * Hn.real - Hn.imag hn = pl.ifft(Hn).real qn = pl.ifft(Qn).real an = pl.sqrt(hn**2 + qn**2) an.resize(old_size) hn = hn[0:old_size] return (an, hn)
def __gauss(sacobj, Tn, alpha): """ Return envelope and gaussian filtered data """ import pylab as pl data = pl.array(sacobj.data) delta = sacobj.delta Wn = 1 / float(Tn) Nyq = 1 / (2 * delta) old_size = data.size pad_size = 2**(int(pl.log2(old_size))+1) data.resize(pad_size) spec = pl.fft(data) spec.resize(pad_size) W = pl.array(pl.linspace(0, Nyq, pad_size)) Hn = spec * pl.exp(-1 * alpha * ((W-Wn)/Wn)**2) Qn = complex(0,1) * Hn.real - Hn.imag hn = pl.ifft(Hn).real qn = pl.ifft(Qn).real an = pl.sqrt(hn**2 + qn**2) an.resize(old_size) hn = hn[0:old_size] return(an, hn)
def __init__(self,winSize,rate): self.twoPiJ=2.0*N.pi*complex(0,1) self.winSize=winSize self.chunkSize=winSize/2 self.hann=self._hanning() self.Hann=P.ifft(self.hann) self.rate=float(rate) self.freqT=rate/winSize self.nyquist=rate/2 self.binF = N.zeros(self.winSize,N.double) self.binF[0:self.winSize] = N.arange(0,rate,self.freqT) self.binFW = (self.binF+self.nyquist)%rate-self.nyquist
def InverseFT(shortft, Neven=False, fNyq=False, tOffset=0): ftlist = [shortft.s1, shortft.s2, shortft.s3] tslist = [] for ft in ftlist: if ft.Offset1 == 0: pftdata = ft.data else: pftdata = numpy.array([0] + list(ft.data)) if Neven == False: nftdata = numpy.conj(numpy.flipud(pftdata))[:-1] ftdata = numpy.concatenate((pftdata, nftdata)) elif (Neven, fNyq) == (True, True): nftdata = numpy.conj(numpy.flipud(pftdata))[1:-1] ftdata = numpy.concatenate((pftdata, nftdata)) elif (Neven, fNyq) == (True, False): nftdata = numpy.conj(numpy.flipud(pftdata))[:-1] ftdata = numpy.concatenate((pftdata, numpy.array([0]), nftdata)) N = ftdata.shape[0] dt = 1. / (N * ft.Cadence1) norm = numpy.sqrt(N / (2. * dt)) tsdata = pylab.ifft(norm * ftdata) tsdata = numpy.real(tsdata) tslist += [ Utilities3.Coarsable(data=tsdata, Offset1=tOffset, Cadence1=dt) ] tsdict = {} tsdict['s1'], tsdict['s2'], tsdict['s3'] = tslist[0], tslist[1], tslist[2] return TimeSeries(**tsdict)
def detrend(data,detrend_Kernel_Length = 10,sampling_Frequency = 100000,channels = 8): from pylab import fft, ifft, sin , cos,log,plot,show,conj,legend import random n=len(data[0]) detrend_fft_Length = (2**((log(detrend_Kernel_Length * sampling_Frequency)/log(2)))) ma = [1.0]*sampling_Frequency ma.extend([0.0]*(detrend_fft_Length - sampling_Frequency)) mafft = fft(ma) trend = [0.0]*n for nch in range(channels): count = 0 while count + detrend_fft_Length <= len(data[nch]): temp = data[nch][count:count+int(detrend_fft_Length)] y = fft(temp) z = ifft( conj(mafft)*y) for cc in xrange(count,count+(int(detrend_fft_Length)-sampling_Frequency)): trend[cc] = z[cc-count].real / sampling_Frequency count = count+(int(detrend_fft_Length)-sampling_Frequency) for cc in xrange(len(trend)): data[nch][cc] = data[nch][cc] - trend[cc]
def InverseFT( shortft , Neven=False , fNyq=False , tOffset=0 ): ftlist = [ shortft.s1 , shortft.s2 , shortft.s3 ] tslist = [] for ft in ftlist: if ft.Offset1 == 0: pftdata = ft.data else: pftdata = numpy.array( [0] + list( ft.data ) ) if Neven == False: nftdata = numpy.conj( numpy.flipud( pftdata ) )[:-1] ftdata = numpy.concatenate( ( pftdata , nftdata ) ) elif ( Neven , fNyq ) == ( True , True ): nftdata = numpy.conj( numpy.flipud( pftdata ) )[1:-1] ftdata = numpy.concatenate( ( pftdata , nftdata ) ) elif ( Neven , fNyq ) == ( True , False ): nftdata = numpy.conj( numpy.flipud( pftdata ) )[:-1] ftdata = numpy.concatenate( ( pftdata , numpy.array( [0] ) , nftdata ) ) N = ftdata.shape[0] dt = 1. / ( N * ft.Cadence1 ) norm = numpy.sqrt( N / ( 2.*dt ) ) tsdata = pylab.ifft( norm * ftdata ) tsdata = numpy.real( tsdata ) tslist += [ Utilities3.Coarsable( data=tsdata , Offset1=tOffset , Cadence1=dt ) ] tsdict = {} tsdict['s1'] , tsdict['s2'] , tsdict['s3'] = tslist[0] , tslist[1] , tslist[2] return TimeSeries( **tsdict )
def perform_convolution(x,h,bound="sym"): """ perform_convolution - compute convolution with centered filter. y = perform_convolution(x,h,bound); The filter 'h' is centred at 0 for odd length of the filter, and at 1/2 otherwise. This works either for 1D or 2D convolution. For 2D the matrix have to be square. 'bound' is either 'per' (periodic extension) or 'sym' (symmetric extension). Copyright (c) 2004 Gabriel Peyre """ if bound not in ["sym", "per"]: raise Exception('bound should be sym or per') if np.ndim(x) == 3 and np.shape(x)[2] < 4: #for color images y = x; for i in range(np.shape(x)[2]): y[:,:,i] = perform_convolution(x[:,:,i],h, bound) return y if np.ndim(x) == 3 and np.shape(x)[2] >= 4: raise Exception('Not yet implemented for 3D array, use smooth3 instead.') n = np.shape(x) p = np.shape(h) nd = np.ndim(x) if nd == 1: n = len(x) p = len(h) if bound == 'sym': ################################# # symmetric boundary conditions # raise Exception('Not yet implemented') else: ################################ # periodic boundary conditions # if p > n: raise Exception('h filter should be shorter than x.') n = np.asarray(n) p = np.asarray(p) d = np.floor((p-1)/2.) if nd == 1: h = np.vstack((h[d:],np.vstack((np.zeros(n-p),h[:d])))) y = np.real(pyl.ifft(pyl.fft(x)*pyl.fft(h))) else: h = np.vstack((h[d[0]:,:],np.vstack((np.zeros([n[0]-p[0],p[1]]),h[:(d[0]),:])))) h = np.hstack((h[:,d[1]:],np.hstack((np.zeros([n[0],n[1]-p[1]]),h[:,:(d[1])])))) y = np.real(pyl.ifft2(pyl.fft2(x)*pyl.fft2(h))) return y
T=nFFT/rate f=1000.0 s=Spectral(nFFT,rate) nFrag=7 y=N.zeros(nFrag*fragSize,N.double) X1=s.freqVectH(f,0) D=s.deltaFF(f,T/2.0) for i in range(nFrag-1): chunk=N.real(P.ifft(X1)) y[i*fragSize:i*fragSize+nFFT] += chunk X1=X1*D*.8 # X2=s.Hann # X1 *= s.shiftVec(T/2) from matplotlib.pyplot import * plot(y) show()
def autoCorr(self, timeSeries): self.N = len(timeSeries) self.nfft = int(2 ** math.ceil(math.log(abs(self.N),2))) self.ACF = p.ifft(p.fft(timeSeries,self.nfft) * p.conjugate(p.fft(timeSeries,self.nfft))) self.ACF = list(p.real(self.ACF[:int(math.ceil((self.nfft+1)/2.0))])) self.plotAutoCorr()
def unweight2d(self, data, ov, bw): print(" Applying UNWEIGHT with ov=[" + str(ov[0]) + "," + str(ov[1]) + "] ... ") n0 = data.shape[0] n1 = data.shape[1] # Percentage -> Pixels, secure having integers #------------------------------------------------------------ bw0 = int(np.floor((bw[0] * n0 / 100.) / 2) * 2) # even integer bw1 = int(np.floor((bw[1] * n1 / 100.) / 2) * 2) # even integer ov0 = int(np.floor(ov[0])) ov1 = int(np.floor(ov[1])) #------------------------------------------------------------ spec0 = py.fft2(data) spec0 = np.roll(spec0, data.shape[0] / 2, axis=0) spec0 = np.roll(spec0, data.shape[1] / 2, axis=1) # Hamming at processed bandwidth #----------------------------------------------- if 1: t0 = np.arange(bw0) / float(bw0) t1 = np.arange(bw1) / float(bw1) hamming0 = 0.54 - 0.46 * np.cos(2 * np.pi * t0) hamming1 = 0.54 - 0.46 * np.cos(2 * np.pi * t1) unham0 = np.zeros(n0) unham1 = np.zeros(n1) unham0[n0 / 2 - bw0 / 2:n0 / 2 + bw0 / 2] = hamming0 unham1[n1 / 2 - bw1 / 2:n1 / 2 + bw1 / 2] = hamming1 #----------------------------------------------- spec0_profile0 = np.abs(spec0).mean(axis=1) maxv0 = 0.95 * np.max(np.abs(spec0_profile0)) spec0_profile1 = np.abs(spec0).mean(axis=0) maxv1 = 0.95 * np.max(np.abs(spec0_profile1)) # Remove doppler shift and range spectrum shift #------------------------------------------------------------------------------------------------------ corr0 = np.abs( py.ifft( py.fft(np.abs(spec0_profile0)) * np.conj(py.fft(np.abs(unham0))))) corr1 = np.abs( py.ifft( py.fft(np.abs(spec0_profile1)) * np.conj(py.fft(np.abs(unham1))))) peak0 = np.where(abs(corr0) == np.max(abs(corr0))) off0 = n0 - peak0[0] peak1 = np.where(abs(corr1) == np.max(abs(corr1))) off1 = n1 - peak1[0] spec0 = np.roll(spec0, off0, axis=0) spec0 = np.roll(spec0, off1, axis=1) spec0_profile0 = np.abs(spec0).mean(axis=1) maxv0 = 0.95 * np.max(np.abs(spec0_profile0)) spec0_profile1 = np.abs(spec0).mean(axis=0) maxv1 = 0.95 * np.max(np.abs(spec0_profile1)) #------------------------------------------------------------------------------------------------------ # Replace Unhamming filter by profile filter #------------------------------------------------------------------------------ if 1: unham0 = self.smooth(spec0_profile0 / maxv0, window_len=11) unham1 = self.smooth(spec0_profile1 / maxv1, window_len=11) #------------------------------------------------------------------------------ # Show profiles #------------------------------------------------ show_plots = False if show_plots: plt.plot(spec0_profile0, 'k-', lw=1, color='blue') plt.show() plt.plot(spec0_profile1, 'k-', lw=1, color='red') plt.show() #------------------------------------------------ # Compare profiles to hamming filter #---------------------------------------------------------------------------------------------------------------- if show_plots: plt.plot(spec0_profile0, 'k-', lw=1, color='blue') plt.plot(self.smooth(spec0_profile0, window_len=21), 'k-', lw=1, color='green') plt.plot(maxv0 * unham0, 'k--', lw=1, color='red') plt.show() plt.plot(spec0_profile1, 'k-', lw=1, color='blue') plt.plot(self.smooth(spec0_profile1, window_len=21), 'k-', lw=1, color='green') plt.plot(maxv1 * unham1, 'k--', lw=1, color='red') plt.show() plt.plot(spec0_profile0[n0 / 2 - bw0 / 2:n0 / 2 + bw0 / 2], 'k-', lw=1, color='blue') plt.plot(maxv0 * unham0[n0 / 2 - bw0 / 2:n0 / 2 + bw0 / 2], 'k-', lw=1, color='red') plt.show() plt.plot(spec0_profile1[n1 / 2 - bw1 / 2:n1 / 2 + bw1 / 2], 'k-', lw=1, color='blue') plt.plot(maxv1 * unham1[n1 / 2 - bw1 / 2:n1 / 2 + bw1 / 2], 'k-', lw=1, color='red') plt.show() plt.plot(spec0_profile0[n0 / 2 - bw0 / 2:n0 / 2 + bw0 / 2] / (maxv0 * unham0[n0 / 2 - bw0 / 2:n0 / 2 + bw0 / 2]), 'k-', lw=1, color='blue') plt.show() plt.plot(spec0_profile1[n1 / 2 - bw1 / 2:n1 / 2 + bw1 / 2] / (maxv1 * unham1[n1 / 2 - bw1 / 2:n1 / 2 + bw1 / 2]), 'k-', lw=1, color='blue') plt.show() #---------------------------------------------------------------------------------------------------------------- # Unhamming #------------------------------------------------------------------ #print " mean ..."+str(np.mean(abs(spec0))) #print " Unhamming ..." for k in range(0, n1): spec0[n0 / 2 - bw0 / 2:n0 / 2 + bw0 / 2, k] /= unham0[n0 / 2 - bw0 / 2:n0 / 2 + bw0 / 2] # range (y) for k in range(0, n0): spec0[k, n1 / 2 - bw1 / 2:n1 / 2 + bw1 / 2] /= unham1[n1 / 2 - bw1 / 2:n1 / 2 + bw1 / 2] # azimuth (x) #print " Unhamming done." #print " mean ..."+str(np.mean(abs(spec0))) #------------------------------------------------------------------ # Show profiles #------------------------------------------------ if show_plots: abs_spec0 = np.abs(spec0) spec0_profile0 = abs_spec0.sum(axis=1) spec0_profile1 = abs_spec0.sum(axis=0) plt.plot(spec0_profile0, 'k-', lw=1, color='blue') plt.show() plt.plot(spec0_profile1, 'k-', lw=1, color='red') plt.show() #------------------------------------------------ spec0 = np.roll(-spec0, data.shape[0] / 2, axis=0) spec0 = np.roll(-spec0, data.shape[1] / 2, axis=1) # Zero padding #-------------------------------------------------------------------------------------- n0 = spec0.shape[0] n1 = spec0.shape[1] zeros0 = np.zeros((bw0 * (ov0 - 1), n1), float) + 1j * np.zeros( (bw0 * (ov0 - 1), n1), float) spec1 = np.concatenate( (spec0[0:bw0 / 2, :], zeros0, spec0[-bw0 / 2:, :]), axis=0) * ov0 n0 = spec1.shape[0] n1 = spec1.shape[1] zeros1 = np.zeros((n0, bw1 * (ov1 - 1)), float) + 1j * np.zeros( (n0, bw1 * (ov1 - 1)), float) spec2 = np.concatenate( (spec1[:, 0:bw1 / 2], zeros1, spec1[:, -bw1 / 2:]), axis=1) * ov1 #-------------------------------------------------------------------------------------- # Show zeros padding results #-------------------------------------------------------------------------------- ''' plt.imshow(np.abs(spec0), origin='lower', interpolation='none', cmap=plt.cm.BuGn) plt.show() plt.imshow(np.abs(spec1), origin='lower', interpolation='none', cmap=plt.cm.BuGn) plt.show() plt.imshow(np.abs(spec2), origin='lower', interpolation='none', cmap=plt.cm.BuGn) plt.show() ''' #-------------------------------------------------------------------------------- data = py.ifft2(spec2) print(" Applying UNWEIGHT with ov=[" + str(ov[0]) + "," + str(ov[1]) + "] done. ") return data
for i in range(winSize): R[i]=N.exp(j*N.pi*i) yChunk=s.tVect() y=N.zeros(samples,N.float) player= player.Player() nChunks = int(samples/chunkSize) w=s.hanning() W=P.fft(w) P.plot(abs(W)) P.show() for i in range(nChunks-1): i1=i*chunkSize i2=i1+winSize z=N.double(P.ifft(X1)) y[i1:i2] += z*w X1 *= R player.play(y)
import pylab from math import pi, sin from conv import conv, noise, plot N = 128 # number of taps ff = 20 # frequency of filter f1 = 10 # frequency of first sine f2 = 50 # frequency of second sine t = 8 # number of taps in filter # create FIR mask mask = [1.0]*ff + [0.0]*(N-ff) # create FIR template temp = abs(pylab.ifft(mask)) # truncate, mirror template filt = [temp[i] for i in range (8,0,-1)] filt += [temp[i] for i in range (0,9,+1)] # use it x = [(sin (2.0*pi*f1*i/N) + sin (2.0*pi*f2*i/N)) for i in range (N)] y = conv (x, filt)[:N] # plot FFT before & after F = abs(pylab.fft(x)) G = abs(pylab.fft(y)) # plot all pylab.close (5) pylab.figure (5)
def perform_convolution(x, h, bound="sym"): """ perform_convolution - compute convolution with centered filter. y = perform_convolution(x,h,bound); The filter 'h' is centred at 0 for odd length of the filter, and at 1/2 otherwise. This works either for 1D or 2D convolution. For 2D the matrix have to be square. 'bound' is either 'per' (periodic extension) or 'sym' (symmetric extension). Copyright (c) 2004 Gabriel Peyre """ if bound not in ["sym", "per"]: raise Exception('bound should be sym or per') if np.ndim(x) == 3 and np.shape(x)[2] < 4: #for color images y = x for i in range(np.shape(x)[2]): y[:, :, i] = perform_convolution(x[:, :, i], h, bound) return y if np.ndim(x) == 3 and np.shape(x)[2] >= 4: raise Exception( 'Not yet implemented for 3D array, use smooth3 instead.') n = np.shape(x) p = np.shape(h) nd = np.ndim(x) if nd == 1: n = len(x) p = len(h) if bound == 'sym': ################################# # symmetric boundary conditions # d1 = np.asarray(p).astype(int) / 2 # padding before d2 = p - d1 - 1 # padding after if nd == 1: ################################# 1D ################################# nx = len(x) xx = np.vstack((x[d1:-1:-1], x, x[nx - 1:nx - d2 - 1:-1])) y = signal.convolve(xx, h) y = y[p:nx - p - 1] elif nd == 2: ################################# 2D ################################# #double symmetry nx, ny = np.shape(x) xx = x xx = np.vstack( (xx[d1[0]:-1:-1, :], xx, xx[nx - 1:nx - d2[0] - 1:-1, :])) xx = np.hstack( (xx[:, d1[1]:-1:-1], xx, xx[:, ny - 1:ny - d2[1] - 1:-1])) y = signal.convolve2d(xx, h, mode="same") y = y[(2 * d1[0]):(2 * d1[0] + n[0] + 1), (2 * d1[1]):(2 * d1[1] + n[1] + 1)] else: ################################ # periodic boundary conditions # if p > n: raise Exception('h filter should be shorter than x.') n = np.asarray(n) p = np.asarray(p) d = np.floor((p - 1) / 2.) if nd == 1: h = np.vstack((h[d:], np.vstack((np.zeros(n - p), h[:d])))) y = np.real(pyl.ifft(pyl.fft(x) * pyl.fft(h))) else: h = np.vstack((h[int(d[0]):, :], np.vstack((np.zeros([n[0] - p[0], p[1]]), h[:int(d[0]), :])))) h = np.hstack( (h[:, int(d[1]):], np.hstack((np.zeros([n[0], n[1] - p[1]]), h[:, :int(d[1])])))) y = np.real(pyl.ifft2(pyl.fft2(x) * pyl.fft2(h))) return y
def unweight2d(self, data, ov, bw): print(" Applying UNWEIGHT with ov=["+str(ov[0])+","+str(ov[1])+"] ... ") n0 = data.shape[0] n1 = data.shape[1] # Percentage -> Pixels, secure having integers #------------------------------------------------------------ bw0 = int(np.floor( (bw[0]*n0/100.) /2) * 2) # even integer bw1 = int(np.floor( (bw[1]*n1/100.) /2) * 2) # even integer ov0 = int(np.floor(ov[0])) ov1 = int(np.floor(ov[1])) #------------------------------------------------------------ spec0 = py.fft2(data) spec0 = np.roll(spec0,data.shape[0]/2, axis=0) spec0 = np.roll(spec0,data.shape[1]/2, axis=1) # Hamming at processed bandwidth #----------------------------------------------- if 1: t0 = np.arange(bw0)/float(bw0) t1 = np.arange(bw1)/float(bw1) hamming0 = 0.54-0.46*np.cos(2*np.pi*t0) hamming1 = 0.54-0.46*np.cos(2*np.pi*t1) unham0 = np.zeros(n0) unham1 = np.zeros(n1) unham0[n0/2-bw0/2:n0/2+bw0/2] = hamming0 unham1[n1/2-bw1/2:n1/2+bw1/2] = hamming1 #----------------------------------------------- spec0_profile0 = np.abs(spec0).mean(axis=1); maxv0 = 0.95 * np.max(np.abs(spec0_profile0)) spec0_profile1 = np.abs(spec0).mean(axis=0); maxv1 = 0.95 * np.max(np.abs(spec0_profile1)) # Remove doppler shift and range spectrum shift #------------------------------------------------------------------------------------------------------ corr0 = np.abs( py.ifft( py.fft(np.abs(spec0_profile0)) * np.conj(py.fft(np.abs(unham0))) )) corr1 = np.abs( py.ifft( py.fft(np.abs(spec0_profile1)) * np.conj(py.fft(np.abs(unham1))) )) peak0 = np.where(abs(corr0) == np.max(abs(corr0))); off0 = n0 - peak0[0] peak1 = np.where(abs(corr1) == np.max(abs(corr1))); off1 = n1 - peak1[0] spec0 = np.roll(spec0, off0, axis=0) spec0 = np.roll(spec0, off1, axis=1) spec0_profile0 = np.abs(spec0).mean(axis=1); maxv0 = 0.95 * np.max(np.abs(spec0_profile0)) spec0_profile1 = np.abs(spec0).mean(axis=0); maxv1 = 0.95 * np.max(np.abs(spec0_profile1)) #------------------------------------------------------------------------------------------------------ # Replace Unhamming filter by profile filter #------------------------------------------------------------------------------ if 1: unham0 = self.smooth(spec0_profile0 / maxv0, window_len=11) unham1 = self.smooth(spec0_profile1 / maxv1, window_len=11) #------------------------------------------------------------------------------ # Show profiles #------------------------------------------------ show_plots = False if show_plots: plt.plot(spec0_profile0,'k-', lw=1, color='blue') plt.show() plt.plot(spec0_profile1,'k-', lw=1, color='red') plt.show() #------------------------------------------------ # Compare profiles to hamming filter #---------------------------------------------------------------------------------------------------------------- if show_plots: plt.plot(spec0_profile0,'k-', lw=1, color='blue') plt.plot(self.smooth(spec0_profile0,window_len=21),'k-', lw=1, color='green') plt.plot(maxv0 * unham0,'k--', lw=1, color='red') plt.show() plt.plot(spec0_profile1,'k-', lw=1, color='blue') plt.plot(self.smooth(spec0_profile1,window_len=21),'k-', lw=1, color='green') plt.plot(maxv1 * unham1,'k--', lw=1, color='red') plt.show() plt.plot(spec0_profile0[n0/2-bw0/2:n0/2+bw0/2],'k-', lw=1, color='blue') plt.plot(maxv0 * unham0[n0/2-bw0/2:n0/2+bw0/2],'k-', lw=1, color='red') plt.show() plt.plot(spec0_profile1[n1/2-bw1/2:n1/2+bw1/2], 'k-', lw=1, color='blue') plt.plot(maxv1 * unham1[n1/2-bw1/2:n1/2+bw1/2], 'k-', lw=1, color='red') plt.show() plt.plot(spec0_profile0[n0/2-bw0/2:n0/2+bw0/2] / (maxv0 * unham0[n0/2-bw0/2:n0/2+bw0/2]),'k-', lw=1, color='blue') plt.show() plt.plot(spec0_profile1[n1/2-bw1/2:n1/2+bw1/2] / (maxv1 * unham1[n1/2-bw1/2:n1/2+bw1/2]),'k-', lw=1, color='blue') plt.show() #---------------------------------------------------------------------------------------------------------------- # Unhamming #------------------------------------------------------------------ #print " mean ..."+str(np.mean(abs(spec0))) #print " Unhamming ..." for k in range(0,n1): spec0[n0/2-bw0/2:n0/2+bw0/2,k] /= unham0[n0/2-bw0/2:n0/2+bw0/2] # range (y) for k in range(0,n0): spec0[k,n1/2-bw1/2:n1/2+bw1/2] /= unham1[n1/2-bw1/2:n1/2+bw1/2] # azimuth (x) #print " Unhamming done." #print " mean ..."+str(np.mean(abs(spec0))) #------------------------------------------------------------------ # Show profiles #------------------------------------------------ if show_plots: abs_spec0 = np.abs(spec0) spec0_profile0 = abs_spec0.sum(axis=1) spec0_profile1 = abs_spec0.sum(axis=0) plt.plot(spec0_profile0,'k-', lw=1, color='blue') plt.show() plt.plot(spec0_profile1,'k-', lw=1, color='red') plt.show() #------------------------------------------------ spec0 = np.roll(-spec0,data.shape[0]/2, axis=0) spec0 = np.roll(-spec0,data.shape[1]/2, axis=1) # Zero padding #-------------------------------------------------------------------------------------- n0 = spec0.shape[0] n1 = spec0.shape[1] zeros0 = np.zeros((bw0 * (ov0-1),n1),float) + 1j * np.zeros((bw0 * (ov0-1),n1),float) spec1 = np.concatenate( (spec0[0:bw0/2,:], zeros0, spec0[-bw0/2:,:]), axis=0) * ov0 n0 = spec1.shape[0] n1 = spec1.shape[1] zeros1 = np.zeros((n0,bw1 * (ov1-1)),float) + 1j * np.zeros((n0,bw1 * (ov1-1)),float) spec2 = np.concatenate( (spec1[:,0:bw1/2], zeros1, spec1[:,-bw1/2:]), axis=1) * ov1 #-------------------------------------------------------------------------------------- # Show zeros padding results #-------------------------------------------------------------------------------- ''' plt.imshow(np.abs(spec0), origin='lower', interpolation='none', cmap=plt.cm.BuGn) plt.show() plt.imshow(np.abs(spec1), origin='lower', interpolation='none', cmap=plt.cm.BuGn) plt.show() plt.imshow(np.abs(spec2), origin='lower', interpolation='none', cmap=plt.cm.BuGn) plt.show() ''' #-------------------------------------------------------------------------------- data = py.ifft2(spec2) print(" Applying UNWEIGHT with ov=["+str(ov[0])+","+str(ov[1])+"] done. ") return data
def build_fft(input_signal, filter_coefficients, threshold_windows=6, boundary=0): """generate fast transform fourier by windows Params : input_signal : the audio signal filter_coefficients : coefficients of the chirplet bank threshold_windows : calcul the size of the windows boundary : manage the bounds of the signal Returns : fast Fourier transform applied by windows to the audio signal """ num_coeffs = filter_coefficients.size #print(n,boundary,M) half_size = num_coeffs // 2 signal_size = input_signal.size #power of 2 to apply fast fourier transform windows_size = 2**ceil(log2(num_coeffs * (threshold_windows + 1))) number_of_windows = floor(signal_size // windows_size) if number_of_windows == 0: return fft_based(input_signal, filter_coefficients, boundary) windowed_fft = empty_like(input_signal) #pad with 0 to have a size in a power of 2 windows_size = int(windows_size) zeropadding = np.lib.pad(filter_coefficients, (0, windows_size - num_coeffs), 'constant', constant_values=0) h_fft = fft(zeropadding) #to browse the whole signal current_pos = 0 #apply fft to a part of the signal. This part has a size which is a power #of 2 if boundary == 0: #ZERO PADDING #window is half padded with since it's focused on the first half window = input_signal[current_pos:current_pos + windows_size - half_size] zeropaddedwindow = np.lib.pad(window, (len(h_fft) - len(window), 0), 'constant', constant_values=0) x_fft = fft(zeropaddedwindow) elif boundary == 1: #SYMMETRIC window = concatenate([ flipud(input_signal[:half_size]), input_signal[current_pos:current_pos + windows_size - half_size] ]) x_fft = fft(window) else: x_fft = fft(input_signal[:windows_size]) windowed_fft[:windows_size - num_coeffs] = (ifft( x_fft * h_fft)[num_coeffs - 1:-1]).real current_pos += windows_size - num_coeffs - half_size #apply fast fourier transofm to each windows while current_pos + windows_size - half_size <= signal_size: x_fft = fft(input_signal[current_pos - half_size:current_pos + windows_size - half_size]) #Suppress the warning, work on the real/imagina windowed_fft[current_pos:current_pos + windows_size - num_coeffs] = (ifft(x_fft * h_fft)[num_coeffs - 1:-1]).real current_pos += windows_size - num_coeffs # print(countloop) #apply fast fourier transform to the rest of the signal if windows_size - (signal_size - current_pos + half_size) < half_size: window = input_signal[current_pos - half_size:] zeropaddedwindow = np.lib.pad( window, (0, int(windows_size - (signal_size - current_pos + half_size))), 'constant', constant_values=0) x_fft = fft(zeropaddedwindow) windowed_fft[current_pos:] = roll(ifft( x_fft * h_fft), half_size)[half_size:half_size + windowed_fft.size - current_pos].real windowed_fft[-half_size:] = convolve(input_signal[-num_coeffs:], filter_coefficients, 'same')[-half_size:] else: window = input_signal[current_pos - half_size:] zeropaddedwindow = np.lib.pad( window, (0, int(windows_size - (signal_size - current_pos + half_size))), 'constant', constant_values=0) x_fft = fft(zeropaddedwindow) windowed_fft[current_pos:] = ifft( x_fft * h_fft)[num_coeffs - 1:num_coeffs + windowed_fft.size - current_pos - 1].real return windowed_fft
def create_interf(freq,resp,band=[], plt=False,sav=False, res=1.0, two=False): ''' print, "create_interf, freq, resp, tc=tc, plt=plt,sav=sav, band=band, res=res, bw=bw, two=two" print, "freq, resp - put in your own frequency and response data" print, "/plt plots the band pass and interferrogram" print, "/sav saves the interferrogram to a text file" print, "band = band, res=res, /bw - use these to create freq/resp band with create_band" print, "/two - put 2 interferrograms in a row" return, 0 ''' # def where_closest(value,array): # abs_diff = pl.array(abs(array-value)) # wh=pl.where(abs_diff == min(abs_diff))[0] # wh_closest = abs_diff[wh] # return wh_closest if len(band) != 0: r = create_band(band, res) freq = r['Freq'] resp = r['resp'] if band[1] == band[0]: resp = pl.zeros(len(freq)) k = where_closest(band[0], freq) resp[k[0]] = 1.0 #if freq(0) != 0 then return with warning! if freq[0] != 0: print 'Must go down to zero frequency' return -1 #Let's be careful with these n's #From DFanning #Let N=8, N/2 = 4, then F_ns for 0,1,2,3,4, -3,-2,-1 NOTE: no -4! n = pl.arange(len(freq)/2.+1) x = 30*n/(max(freq)-min(freq)) #30 to go from GHz to icm intf = pl.ifft(resp) x2 = pl.concatenate((x, -(x[1:len(x)-2])[::-1])) #Crap. should this be -2 or -1 if len(freq) % 2 == 1 : x2 = pl.concatenate((x, -(x[1:len(x)-1])[::-1])) #plot, freq, resp #oplot, freq, FFT(intf), color=1 if two: x2 = pl.concatenate((x2, x2+2*max(x2))) intf = pl.concatenate((intf, intf)) q = x2.argsort() x2 = x2[q] intf = (intf[q]).real result ={'x': x2, 'intf':intf} if plt: if len(band) != 0 : rtemp = create_band(band, res, plt=True) pl.plot(freq, resp) pl.title('Band') pl.figure() pl.plot(x2, intf.real) pl.xlabel('Optical Delay (cm)') pl.ylabel('Response') pl.title('Interferrogram') #if sav: # openw, 1, sav # x0 = result.x(0:n_elements(x2)-1) # outp = [[x0], [real_part(result.intf)], [imaginary(result.intf)]] # printf, 1, transpose(outp) # close, 1 return result