def fourier_transform_and_reconstruct(image, detrend=False, window=False, ffunc=None): """ Take fourier transform, alter it, and reconstruct image. For some reason this is shifting the origin by 1 pixel after reconstruction, which should not happen. :param image: data :type image: :py:class:`numpy.ndarray` :param ffunc: function that alters FFT matrix :type ffunc: func :return: modified image data :rtype: :py:class:`numpy.ndarray` """ if window: w = signal.hamming(image.shape) else: w = np.ones_like(image) if detrend: f = fftpack.fft(w * signal.detrend(image)) else: f = fftpack.fft(w * image) # alter the fft if not ffunc is None: f = ffunc(f) result = np.fliplr(fftpack.fft(f)) return result > result.mean()
def compute(self): sig_array = self.get_input("Signals") # If there is no input on the samples port, # use the number of samples in an array row for # the number of fft points. if self.has_input("Samples"): pts = self.get_input("Samples") else: try: pts = sig_array.get_shape()[1] except: pts = sig_array.get_shape()[0] sh = sig_array.get_shape() if len(sh) < 2: shp = (1, sh[0]) sig_array.reshape(shp) (num_sigs, num_samps) = sig_array.get_shape() phasors = fftpack.fft(sig_array.get_row_range(0,0), pts) out_ar = phasors for i in xrange(1,num_sigs): phasors = fftpack.fft(sig_array.get_row_range(i,i), pts) out_ar = numpy.vstack([out_ar, phasors]) out = NDArray() out.set_array(out_ar) self.set_output("FFT Output", out)
def xcorrf(trace1, trace2, shift=None): """ Cross-correlation of numpy arrays data1 and data2 in frequency domain. """ data1 = trace1.data data2 = trace2.data complex_result = data1.dtype == complex or data2.dtype == complex N1 = len(data1) N2 = len(data2) data1 = data1.astype("float64") data2 = data2.astype("float64") # Always use 2**n-sized FFT, perform xcorr size = max(2 * shift + 1, (N1 + N2) // 2 + shift) nfft = nextpow2(size) IN1 = fft(data1, nfft) IN1 *= conjugate(fft(data2, nfft)) ret = ifft(IN1) del IN1 if not complex_result: ret = ret.real # shift data for time lag 0 to index 'shift' ret = roll(ret, -(N1 - N2) // 2 + shift)[: 2 * shift + 1] return copy(ret)
def _find_mp(self, counter, template, primer, t_len, p_len, mismatches): """Find all occurrences of a primer sequence in both strands of a template sequence with at most k mismatches. Multiprocessing version.""" slice_size = t_len / self._optimal_slices(t_len, p_len) + p_len + 1 slice_stride = slice_size - p_len chunk_size = self._calculate_chunk_size(slice_size, p_len) chunk_stride = chunk_size - p_len p_maps = self._map_pattern(str(primer.master_sequence.seq), chunk_size) p_fft = (fft(p_maps[0]), fft(p_maps[1])) fwd_seq = str(template.seq) rev_seq = reverse_complement(fwd_seq) correction = np.ndarray(chunk_stride) correction.fill(p_len / 3.0) # start find_in_slice jobs counter.set_work(t_len / slice_stride + 1) pos = 0 work = self.Work(counter=counter) while pos < t_len and not self.aborted(): front = min(t_len, pos + slice_size) queue = self._Queue() job = self._Process( target=self._find_in_slice, args=( queue, self._abort_event, len(work), pos, front, fwd_seq, rev_seq, p_fft, correction, slice_stride, chunk_size, chunk_stride, ), ) job.daemon = 1 job.start() work.add_job(job, queue) pos += slice_stride work.start_jobs() # if scores arrays are allocated beforehand, the memory # is returned upon deletion scores_len = slice_stride * len(work) scores = [np.zeros(scores_len), np.zeros(scores_len)] def assembler(out, scores): if out: scores[0][out[0] : out[0] + slice_stride] = out[1] scores[1][out[0] : out[0] + slice_stride] = out[2] work.assemble(assembler, scores) if not work.wait() or self.aborted(): return None # compute match indices matches = max(1, p_len - mismatches) - 0.5 fwd_matches = np.where(scores[0][: t_len - p_len + 1] >= matches)[0] rev_matches = np.where(scores[1][: t_len - p_len + 1] >= matches)[0] return fwd_seq, rev_seq, primer, t_len, p_len, fwd_matches, rev_matches
def get_significant_frequencies(self, data, total_freq=15, max_addition=10, max_iteration=1000): N = len(data) xf = np.linspace(0.0, N, N) # initialise significant frequencies by taking frequency 0 spectrum_data = fft(data) [amp, phs, freq] = self.get_highest_n_freq(spectrum_data, 1)[0] frequencies = [[amp, phs, freq]] freq_occur_counter = {freq: 1} exit_counter = 0 # data -= amp while len(frequencies) < total_freq: spectrum_data = fft(data) # recreate wave of the highest frequency [amp, phs, freq] = self.get_highest_n_freq(spectrum_data, 2)[1] if freq == 0: [amp, phs, freq] = self.get_highest_n_freq(spectrum_data, 2)[0] wave = amp * np.cos((freq * 2.0 * np.pi * xf) + phs) # substracting data with the wave data -= wave if freq not in zip(*frequencies)[2]: frequencies.append([amp, phs, freq]) freq_occur_counter.update({freq: 1}) else: for ind, val in enumerate(frequencies): if frequencies[ind][2] == freq and freq_occur_counter[freq] < max_addition: frequencies[ind][0] += amp frequencies[ind][1] = (( freq_occur_counter[freq] * frequencies[ind][1] ) + phs) / (freq_occur_counter[freq] + 1) freq_occur_counter[freq] += 1 exit_counter += 1 if exit_counter >= max_iteration: break return frequencies, data
def MoyalPropagation(W): """ Propagate wigner function W by the Moyal equation. This function is used to verify that the obtained wigner functions are steady state solutions of the Moyal equation. """ # Make a copy W = np.copy(W) dt = 0.005 # time increment TIterSteps = 2000 # Pre-calculate exp expIV = np.exp(-1j*dt*(V(X - 0.5*Theta) - V(X + 0.5*Theta))) expIK = np.exp(-1j*dt*(K(P + 0.5*Lambda) - K(P - 0.5*Lambda))) for _ in xrange(TIterSteps): # p x -> theta x W = fftpack.fft(W, axis=0, overwrite_x=True) W *= expIV # theta x -> p x W = fftpack.ifft(W, axis=0, overwrite_x=True) # p x -> p lambda W = fftpack.fft(W, axis=1, overwrite_x=True) W *= expIK # p lambda -> p x W = fftpack.ifft(W, axis=1, overwrite_x=True) # normalization W /= W.real.sum()*dX*dP return fftpack.fftshift(W.real)
def bench_random(self): from numpy.fft import fft as numpy_fft print() print(' Fast Fourier Transform') print('=================================================') print(' | real input | complex input ') print('-------------------------------------------------') print(' size | scipy | numpy | scipy | numpy ') print('-------------------------------------------------') for size,repeat in [(100,7000),(1000,2000), (256,10000), (512,10000), (1024,1000), (2048,1000), (2048*2,500), (2048*4,500), ]: print('%5s' % size, end=' ') sys.stdout.flush() for x in [random([size]).astype(double), random([size]).astype(cdouble)+random([size]).astype(cdouble)*1j ]: if size > 500: y = fft(x) else: y = direct_dft(x) assert_array_almost_equal(fft(x),y) print('|%8.2f' % measure('fft(x)',repeat), end=' ') sys.stdout.flush() assert_array_almost_equal(numpy_fft(x),y) print('|%8.2f' % measure('numpy_fft(x)',repeat), end=' ') sys.stdout.flush() print(' (secs for %s calls)' % (repeat)) sys.stdout.flush()
def convolve_scalogram(ana, wf, sampling_rate,optimize_fft): n = wf.shape[0] sig = ana.magnitude ana_sr=ana.sampling_rate.rescale('Hz').magnitude if optimize_fft: sig=sig-sig.mean() # Remove mean before padding nfft=int(2**np.ceil(np.log(sig.size)/np.log(2))) sig=np.r_[sig,np.zeros(nfft-sig.size)] # pad signal with 0 to a power of 2 length sig=resample(sig,int(sig.size*sampling_rate/ana_sr)) # resample in time domain sigf=fftpack.fft(sig,n) # Compute fft with a power of 2 length else: sigf=fftpack.fft(sig) # subsampling in fft domain (attention factor) factor = (sampling_rate/ana.sampling_rate).simplified.magnitude x=(n-1)//2 if np.mod(n,2)==0: sigf = np.concatenate([sigf[0:x+2], sigf[-x:]])*factor else: sigf = np.concatenate([sigf[0:x+1], sigf[-x:]])*factor # windowing ??? #win = fftpack.ifftshift(np.hamming(n)) #sigf *= win # Convolve (mult. in Fourier space) wt_tmp=fftpack.ifft(sigf[:,np.newaxis]*wf,axis=0) # and shift wt = fftpack.fftshift(wt_tmp,axes=[0]) return wt
def test5(): global L0, N L = deepcopy(L0) rho = zeros(N, 'double') rho[0] = 1. rho[N/2] = 1. print rho print fft(rho) rho = fftshift(fft(rho)) print "fft(rho) =", rho L = fft(L).T L = fft(L).T L = fftshift(L) #print L x = linalg.solve(L, rho) print "x =", x #x[abs(x)<0.001] = 0 x = ifftshift(ifft(x)).real * N print "ifft(x) =", x F = [] for i in xrange(len(x)-1): F.append(x[i+1] - x[i]) print "F =", F print "--------------------------------"
def l0_gradient_minimization_1d(I, lmd, beta_max, beta_rate=2.0, max_iter=30, return_history=False): S = np.array(I).ravel() # prepare FFT F_I = fft(S) F_denom = np.abs(psf2otf([-1, 1], S.shape[0]))**2.0 # optimization S_history = [S] beta = lmd*2.0 hp = np.zeros_like(S) for i in range(max_iter): # with S, solve for hp in Eq. (12) hp = circulant_dx(S, 1) mask = hp**2.0 < lmd/beta hp[mask] = 0.0 # with hp, solve for S in Eq. (8) S = np.real(ifft((F_I + beta*fft(circulant_dx(hp, -1))) / (1.0 + beta*F_denom))) # iteration step if return_history: S_history.append(np.array(S)) beta *= beta_rate if beta > beta_max: break if return_history: return S_history return S
def KramersKronigFFT(ImX_A): ''' Hilbert transform used to calculate real part of a function from its imaginary part uses piecewise cubic interpolated integral kernel of the Hilbert transform use only if len(ImX_A)=2**m-1, uses fft from scipy.fftpack ''' X_A = sp.copy(ImX_A) N = int(len(X_A)) ## be careful with the data type, orherwise it fails for large N if N > 3e6: A = sp.arange(3,N+1,dtype='float64') else: A = sp.arange(3,N+1) X1 = 4.0*sp.log(1.5) X2 = 10.0*sp.log(4.0/3.0)-6.0*sp.log(1.5) ## filling the kernel if N > 3e6: Kernel_A = sp.zeros(N-2,dtype='float64') else: Kernel_A = sp.zeros(N-2) Kernel_A = (1-A**2)*((A-2)*sp.arctanh(1.0/(1-2*A))+(A+2)*sp.arctanh(1.0/(1+2*A)))\ +((A**3-6*A**2+11*A-6)*sp.arctanh(1.0/(3-2*A))+(A+3)*(A**2+3*A+2)*sp.arctanh(1.0/(2*A+3)))/3.0 Kernel_A = sp.concatenate([-sp.flipud(Kernel_A),sp.array([-X2,-X1,0.0,X1,X2]),Kernel_A])/sp.pi ## zero-padding the functions for fft ImXExt_A = sp.concatenate([X_A[int((N-1)/2):],sp.zeros(N+2),X_A[:int((N-1)/2)]]) KernelExt_A = sp.concatenate([Kernel_A[N:],sp.zeros(1),Kernel_A[:N]]) ## performing the fft ftReXExt_A = -fft(ImXExt_A)*fft(KernelExt_A) ReXExt_A = sp.real(ifft(ftReXExt_A)) ReX_A = sp.concatenate([ReXExt_A[int((3*N+3)/2+1):],ReXExt_A[:int((N-1)/2+1)]]) return ReX_A
def run(self): if self.filter_on == False: f = lambda x:random.random()+self.amp*np.sin(x) x = np.linspace(0, 10) fft_feature = fft.fft(map(f, x)) ifft_feature = fft.ifft(fft_feature) self.newSample.emit(map(f, x)) self.newSamplefft.emit(list(abs(fft_feature))) self.newSampleifft.emit(list(ifft_feature)) elif self.filter_on == True: f = lambda x:random.random()+self.amp*np.sin(x) x = np.linspace(0, 10) fft_feature = fft.fft(map(f, x)) mean = np.average(abs(fft_feature)) fft_feature_filter = fft_feature for i in range(len(fft_feature)): if abs(fft_feature[i]) >= mean: fft_feature_filter[i] = abs(fft_feature[i]) else: fft_feature_filter[i] = 0 ifft_feature = fft.ifft(fft_feature_filter) self.newSample.emit(map(f, x)) self.newSamplefft.emit(list(fft_feature_filter)) self.newSampleifft.emit(list(ifft_feature)) self.filter_on = False else: pass
def xcorrnorm(tr1, tr2, pad=True): """ Compute normalized cross correlation of two traces maxcor, maxlag, maxdt, cc, lags, tlags = xcorr1x1(tr1, tr2) INPUTS tr1 - obspy trace1 tr2 - obspy trace2 NOT IMPLEMENTED YET freqmin, freqmax - optional, restrict frequency range to [freqmin, freqmax] lags = lag to compute if None, will compute all lags and find max, OUTPUTS maxcor - value of maximum correlation maxlag - lag of maximum correlation (in samples) - this is the number of samples to shift tr2 so it lines up with tr1 maxdt - time lag of max lag, in seconds cc - cross correlation value at each shift lags - lag at each cc value in samples tlags - lag at each cc value in seconds TODO add option to only compute certain lags add option to output entire cc function """ from scipy.fftpack import fft, ifft if tr1.stats.sampling_rate != tr2.stats.sampling_rate: raise RuntimeError('tr1 and tr2 have different sampling rates') return # make sure data is float dat1 = tr1.data*1. dat2 = tr2.data*1. if len(tr1) != len(tr2): if pad is True: print('tr1 and tr2 have different lengths, padding with zeros') if len(dat1) > len(dat2): dat2 = np.lib.pad(dat2, (0, len(dat1)-len(dat2)), 'constant', constant_values=(0., 0.)) else: dat1 = np.lib.pad(dat1, (0, len(dat2)-len(dat1)), 'constant', constant_values=(0., 0.)) else: raise RuntimeError('tr1 and tr2 are different lengths, set pad=True if you want to proceed') return # pad data to double number of samples to avoid wrap around and pad more to next closest power of 2 for fft n2 = nextpow2(len(dat1)) FFT1 = fft(dat1, n2) norm1 = np.sqrt(np.real(ifft(FFT1*np.conj(FFT1), n2))) FFT2 = fft(dat2, n2) norm2 = np.sqrt(np.real(ifft(FFT2*np.conj(FFT2), n2))) cctemp = np.real(ifft(FFT1*np.conj(FFT2), n2)) cc = cctemp/(norm1[0]*norm2[0]) M = len(FFT1) lags = np.roll(np.linspace(-M/2 + 1, M/2, M, endpoint=True), M/2 + 1).astype(int) indx = np.argmax(cc) maxcor = cc[indx] maxlag = lags[indx] maxdt = 1./tr1.stats.sampling_rate*maxlag tlags = 1./tr1.stats.sampling_rate*lags return maxcor, maxlag, maxdt, cc, lags, tlags
def fcglt(A): # Modal Coefficients to Lobatto Nodal """ Fast Chebyshev-Gauss-Lobatto transformation from Chebyshev expansion coefficients (modal) to point space values (nodal). If I=numpy.identity(n), then T=chebyshev.fcglt(I) will be the Chebyshev Vandermonde matrix on the Lobatto nodes """ size = A.shape m = size[0] k = m-2-np.arange(m-2) if len(size) == 2: # Multiple vectors V = np.vstack((2*A[0,:],A[1:m-1,:],2*A[m-1,:],A[k,:])) F = fft(V, n=None, axis=0) B = 0.5*F[0:m,:] else: # Single vector V = np.hstack((2*A[0],A[1:m-1],2*A[m-1],A[k])) F = fft(V, n=None) B = 0.5*F[0:m] if A.dtype!='complex': return np.real(B) else: return B
def fft(self, normalize = False): """ params: `normalize`: if True, the data will be normalize before pass to fft. Default is False. """ coefs = [] data = self.audio_data.data if normalize: if self.audio_data.dtype in [_np.uint8, _np.uint16, _np.uint32]: data = 2.*data/2**(8*self.audio_data.BIT_WIDTH) - 1 elif self.audio_data.dtype in [_np.int8, _np.int16, _np.int32]: data = 2.*data/2**(4*self.audio_data.BIT_WIDTH) - 1 else: print "[Warning] Unrecognized dtype detected." if self.audio_data.CHANNELS == 1: # mono audio data # Take only the coef for the positive freq since the data is real-valued. N = len(self.audio_data.data) c1 = _fftpack.fft(data)[:N/2] coefs.append(c1) else: # stereo audio data N = len(self.audio_data.data[0]) c1 = _fftpack.fft(data[0])[:N/2] coefs.append(c1) c2 = _fftpack.fft(data[1])[:N/2] coefs.append(c2) self.__cached_fft = coefs return coefs
def fitTrace(self,kwidth=10,porder=3,cwidth=30,pad=False): sh = self.sh xr1 = (0,sh[1]) xrs = [xr1] polys = [] for xr in xrs: xindex = np.arange(xr[0],xr[1]) kernel = np.median(self.image[int(sh[0]/2-kwidth):int(sh[0]/2+kwidth),xindex],0) centroids = [] totals = [] for i in np.arange(sh[0]): row = self.image[i,xindex] row_med = np.median(row) total = np.abs((row-row_med).sum()) cc = fp.ifft(fp.fft(kernel)*np.conj(fp.fft(row-row_med))) cc_sh = fp.fftshift(cc) centroid = helpers.calc_centroid(cc_sh,cwidth=cwidth).real - xindex.shape[0]/2. centroids.append(centroid) totals.append(total) centroids = np.array(centroids) yindex = np.arange(sh[0]) gsubs = np.where((np.isnan(centroids)==False)) centroids[gsubs] = median_filter(centroids[gsubs],size=20) coeffs = np.polyfit(yindex[gsubs],centroids[gsubs],porder) poly = np.poly1d(coeffs) polys.append(poly) return xrs,polys
def CQT_fast(x,fs,bins,fmin,fmax,M): threshold = 0.0054 #for Hamming window K = int(bins*np.ceil(np.log2(fmax/fmin))) Q = 1/(2**(1/bins)-1) nfft = np.int32(nearestPow2(np.ceil(Q*fs/fmin))) tempKernel = np.zeros(nfft, dtype = np.complex) specKernel = np.zeros(nfft, dtype = np.complex) sparKernel = [] #create sparse Kernel for k in range(K-1,-1,-1): fk = (2**(k/bins))*fmin N = np.int32(np.round((Q*fs)/fk)) tempKernel[:N] = hamming(N)/N * np.exp(-2*np.pi*1j*Q*np.arange(N)/N) specKernel = fft(tempKernel) specKernel[np.where(np.abs(specKernel) <= threshold)] = 0 if k == K-1: sparKernel = specKernel else: sparKernel = np.vstack((specKernel, sparKernel)) sparKernel = np.transpose(np.conjugate(sparKernel))/nfft ft = fft(x,nfft) cqt = np.dot(ft, sparKernel) ft = fft(x,nfft*(2**M)) #calculate harmonic power spectrum #harm_pow = HPS(ft,M) #cqt = np.dot(harm_pow, sparKernel) return cqt
def make_audio_analysis_plots(infile, prefix='temp', make_plots=True, do_fft=True, fft_sum=None): ''' create frequency plot ''' import numpy as np from scipy import fftpack from scipy.io import wavfile import matplotlib matplotlib.use('Agg') import matplotlib.pyplot as pl if not os.path.exists(infile): return -1 try: rate, data = wavfile.read(infile) except ValueError: print('error reading wav file') return -1 dt_ = 1./rate time_ = dt_ * data.shape[0] tvec = np.arange(0, time_, dt_) sig0 = data[:, 0] sig1 = data[:, 1] if not tvec.shape == sig0.shape == sig1.shape: return -1 if not do_fft: fft_sum_ = float(np.sum(np.abs(sig0))) if hasattr(fft_sum, 'value'): fft_sum.value = fft_sum_ return fft_sum_ if make_plots: pl.clf() pl.plot(tvec, sig0) pl.plot(tvec, sig1) xtickarray = range(0, 12, 2) pl.xticks(xtickarray, ['%d s' % x for x in xtickarray]) pl.savefig('%s/%s_time.png' % (HOMEDIR, prefix)) pl.clf() samp_freq0 = fftpack.fftfreq(sig0.size, d=dt_) sig_fft0 = fftpack.fft(sig0) samp_freq1 = fftpack.fftfreq(sig1.size, d=dt_) sig_fft1 = fftpack.fft(sig1) if make_plots: pl.clf() pl.plot(np.log(np.abs(samp_freq0)+1e-9), np.abs(sig_fft0)) pl.plot(np.log(np.abs(samp_freq1)+1e-9), np.abs(sig_fft1)) pl.xlim(np.log(10), np.log(40e3)) xtickarray = np.log(np.array([20, 1e2, 3e2, 1e3, 3e3, 10e3, 30e3])) pl.xticks(xtickarray, ['20Hz', '100Hz', '300Hz', '1kHz', '3kHz', '10kHz', '30kHz']) pl.savefig('%s/%s_fft.png' % (HOMEDIR, prefix)) pl.clf() run_command('mv %s/%s_time.png %s/%s_fft.png %s/public_html/videos/' % (HOMEDIR, prefix, HOMEDIR, prefix, HOMEDIR)) fft_sum_ = float(np.sum(np.abs(sig_fft0))) if hasattr(fft_sum, 'value'): fft_sum.value = fft_sum_ return fft_sum_
def convolve(self, sigma, is_charge=False): """convolve the 0/bulk potential difference using a Gaussian kernel""" f = interp1d(self.a, self.delta_data_avg, "cubic") n = self.a.size x = np.linspace(self.a.min(), self.a.max(), n*2) y = f(x) g = lambda p, s: 1.0/np.sqrt(2*np.pi)/s*np.exp(-0.5*p**2/s**2) p = np.linspace(-1,1,n*2) gk = fft(fftshift(g(p, sigma))) yk = fft(y) conv = ifft(yk*gk)/n fig = plt.figure() ax = fig.add_subplot(111) ax.set_xlabel (r'$z$ (bohr)') if is_charge==True: # the input cube files are charge densities y *= self.V conv *= self.V else: # the inputs are potentials y *= self.Ha conv *= self.Ha ax.set_ylabel('Electrostatic potential (eV)') ax.plot(x, y, 'o-', x, conv, '-', x, self.q*np.ones(n*2)*(1-1./self.eps), '--') ax.autoscale(axis='x', tight=True) plt.show()
def test_phase_randomize(): from brainiak.utils.utils import phase_randomize import numpy as np from scipy.fftpack import fft import math from scipy.stats import pearsonr # Generate auto-correlated signals nv = 2 T = 100 ns = 3 D = np.zeros((nv, T, ns)) for v in range(nv): for s in range(ns): D[v, :, s] = np.sin(np.linspace(0, math.pi * 5 * (v + 1), T)) + \ np.sin(np.linspace(0, math.pi * 6 * (s + 1), T)) freq = fft(D, axis=1) D_pr = phase_randomize(D) freq_pr = fft(D_pr, axis=1) p_corr = pearsonr(np.angle(freq).flatten(), np.angle(freq_pr).flatten())[0] assert np.isclose(abs(freq), abs(freq_pr)).all(), \ "Amplitude spectrum not preserved under phase randomization" assert abs(p_corr) < 0.03, \ "Phases still correlated after randomization"
def test_definition(self): x = [1,2,3,4+1j,1,2,3,4+2j] y = fft(x) y1 = direct_dft(x) assert_array_almost_equal(y,y1) x = [1,2,3,4+0j,5] assert_array_almost_equal(fft(x),direct_dft(x))
def ssfu(up, dt, dz, nz, alpha, betap, gamma, maxiter = 4, tol = 1e-5): ''' Very simple implementation of the unsymmetrized split-step fourier algo. error: second in step size u0 : Input field ''' nt = len(up) w = wspace(dt*nt,nt) # Construction de l'operateur lineaire linearstep = -alpha/2.0 for ii in arange(len(betap)): linearstep = linearstep - 1.0j*betap[ii]*pow(w,ii)/factorial(ii) linearstep = exp(linearstep*dz) ufft = fftpack.fft(up) for iz in arange(nz): # 1er Application de l'operateur lineaire uhalf = fftpack.ifft(linearstep*ufft) # Application de l'operateur nonlineaire uv = uhalf * exp(-1.0j*gamma*(pow(abs(up),2.0))*dz) ufft = fftpack.fft(uv) up = uv return uv
def get_phase_diff(target_freq, fs, a, b): # sample rate (Hz) sample_rate = fs # Highest frequency captured - limited by sample rate (Hz) high_freq_bound = sample_rate / 2 # Number samples (bins) sample_number = len(a) # Signal freq held in each index of arrays (Hz/bin) scale = sample_rate / sample_number # Index of arrays for complex number we want (bin) index = target_freq / scale # Update target frequency to one that matches scale of fft target_freq = scale * index # Getting our phases (radians) a_phase = phase(fft(a)[index]) b_phase = phase(fft(b)[index]) b_phase = relative_wraparound(a_phase, b_phase) print("a_phase = %f pi radians" % (a_phase / math.pi)) print("b_phase = %f pi radians" % (b_phase / math.pi)) # Compute phase difference phase_diff = b_phase - a_phase print("b leads a by %fpi radians" % (phase_diff / math.pi)) return phase_diff
def freq_filter(f, dt, cutoff_freq, convention='math'): """ A digital filter that filters frequency below a cutoff frequency Parameters ---------- f : time signal dt : sampling period nu_cutoff : cutoff frequency Returns ------- The filtered time signal """ if convention == 'math': f_freq = fft(f) elif convention == 'physics': f_freq = ifft(f) Ns = np.size(f) freqs = fftfreq(Ns, dt) # filtering operation f_freq[np.where(np.abs(freqs) > cutoff_freq)] = 0 # go back to time domain if convention == 'math': f_filter_time = ifft(f_freq) elif convention == 'physics': f_filter_time = fft(f_freq) return f_filter_time
def optimalZeropad(x, fs, f): """ Inputs: x (numpy array) = input signal of length W fs (float) = sampling frequency in Hz f (float) = frequency of the sinusoid in Hz Output: The function should return mX (numpy array) = The positive half of the DFT spectrum of the M point DFT after zero-padding x appropriately (zero-padding length to be computed). mX is (M/2)+1 samples long """ #create a sinusoidal signal X=fft(X) mX=20*np.log10(abs(X)) cnt = np.size(mX[mX>-120]) while cnt > 1 : x=np.append(x, 0) X=fft(X) X=20*np.log10(abs(X)) cnt = np.size(mX[mX>-120])
def pulseSpectrum(t, SVEAAmp, lambdaZero = 0.0, units = 'nm'): ''' Compute the spectrum of o SVEA pulse center at lambdaZero * t: time vector * SVEAAmp: SVEA enveloppe of the pulse * lambdaZero: center of the pulse [m] * units: Units of the output ['nm','um','m'] ''' C = 2.99792458e-4 nt = len(t) dt = t[1] - t[0] T = t.max()-t.min() w = wspace(T,nt) vs = fftpack.fftshift(w/(2*pi)) # Assign uniScale unitScale = { 'nm': lambda: 1.0e9, 'um': lambda: 1.0e6, 'm': lambda: 1.0 }[units]() if lambdaZero != 0.0: wavelength = ( 1.0/( (vs/C)+1.0/(lambdaZero) ) )*unitScale return [wavelength, fftpack.fftshift(pow(abs(dt*fftpack.fft(SVEAAmp)/sqrt(2.0*pi)),2))] else: return [vs, fftpack.fftshift(pow(abs(dt*fftpack.fft(SVEAAmp)/sqrt(2.0*pi)),2))]
def dct(self, x): '''Compute discrete cosine transform of 1-d array x''' #probably don't need this here anymore since it is in fftpack now N = len(x) #calculate DCT weights w = (np.exp(-1j*np.arange(N)*np.pi/(2*N))/np.sqrt(2*N)) w[0] = w[0]/np.sqrt(2) #test for odd or even function if (N%2==1) or (any(np.isreal(x)) == False): y = np.zeros(2*N) y[0:N] = x y[N:2*N] = x[::-1] yy = fftpack.fft(y) yy = yy[0:N] else: y = np.r_[(x[0:N:2], x[N:0:-2])] yy = fftpack.fft(y) w = 2*w #apply weights X = w * yy if all(np.isreal(x)): X = np.real(X) return X
def testAngularMethod(self): sample_rate = 16000 sample_delay = 5 angle = math.pi / 6 if abs(math.cos(angle)) > 1e-10: dist = sample_delay * pa_tools.SPEED_OF_SOUND / (sample_rate * math.cos(angle)) else: dist = 1 sample_delay = 0 print "distance: " + str(dist) mics = np.array([[0., 0.], [dist, 0.]], dtype=np.float32) data_len = 100 data1 = np.random.rand(1, data_len) if sample_delay > 0: data2 = np.concatenate((np.random.rand(1, sample_delay), [data1[0, :-sample_delay]]), axis=1) else: data2 = data1 # Get dfts fft1 = fftp.fft(data1[0]) fft2 = fftp.fft(data2[0]) ffts = np.array([fft1, fft2]) loc = DirectionLocalizer(mics, sample_rate=sample_rate) direction = loc.get_direction_np(ffts) print "direction: " + str(direction) # Plot plt.figure() plt.plot(mics[:, 0], mics[:, 1], 'bo') plt.quiver(0, 0, direction[0], direction[1], scale=20) plt.show()
def calc_QW(n, k, kk, kw, Q, W, useFFT): """ Convolution coefficient Args: n: Order of the coefficients k: Index of the bus C: Voltage coefficients (Ncoeff x nbus elements) Output: Convolution coefficient of order n for the bus k """ if useFFT: a = fftpack.fft(Q[:, kk]) b = fftpack.fft(conj(W[:, kw])) e = fftpack.ifft(a * b) result = e[n] else: result = complex_type(0) for l in range(n): result += Q[l, kk] * conj(W[n-l, kw]) return result
def highpass_filter(data, width): """Highpass filter on *width* scales using blackman window. Finite impulse response filter *that discards invalid data* at the ends. """ ntime = data.shape[-1] # Blackman FWHM factor. window_width = int(width / 0.4054785) if window_width % 2: window_width += 1 window = np.zeros(ntime, dtype=np.float32) window_core = signal.blackman(window_width, sym=True) window_core = -window_core / np.sum(window_core) window_core[window_width // 2] += 1 window[:window_width] = window_core window_fft = fftpack.fft(window) ntime_out = data.shape[-1] - window_width + 1 out_shape = data.shape[:-1] + (ntime_out,) out = np.empty(out_shape, data.dtype) for ii in range(data.shape[0]): d_fft = fftpack.fft(data[ii]) d_fft *= window_fft d_lpf = fftpack.ifft(d_fft) out[ii] = d_lpf[-ntime_out:].real return out
def __init__(self, wav, frame_size=2048, fps=200, filterbank=None, log=False, mul=1, add=1, online=True, block_size=526, lgd=True): """ Creates a new Spectrogram object instance and performs a STFT on the given audio. :param wav: a Wav object :param frame_size: the size for the window [samples] :param fps: frames per second :param filterbank: use the given filterbank for dimensionality reduction :param log: use logarithmic magnitude :param mul: multiply the magnitude by this factor before taking the logarithm :param add: add this value to the magnitude before taking the logarithm :param online: work in online mode (i.e. use only past information) :param block_size: perform the filtering in blocks of the given size :param lgd: compute the local group delay (needed for the ComplexFlux algorithm) """ # init some variables self.wav = wav self.fps = fps self.filterbank = filterbank if add <= 0: raise ValueError("a positive value must be added before taking " "the logarithm") if mul <= 0: raise ValueError("a positive value must be multiplied before " "taking the logarithm") # derive some variables # use floats so that seeking works properly self.hop_size = float(self.wav.sample_rate) / float(self.fps) self.num_frames = int(np.ceil(self.wav.num_samples / self.hop_size)) self.num_fft_bins = int(frame_size / 2) # initial number of bins equal to fft bins, but those can change if # filters are used self.num_bins = int(frame_size / 2) # init spec matrix if filterbank is None: # init with number of FFT frequency bins self.spec = np.empty([self.num_frames, self.num_fft_bins], dtype=np.float32) else: # init with number of filter bands self.spec = np.empty( [self.num_frames, np.shape(filterbank)[1]], dtype=np.float32) # set number of bins self.num_bins = np.shape(filterbank)[1] # set the block size if not block_size or block_size > self.num_frames: block_size = self.num_frames # init block counter block = 0 # init a matrix of that size spec = np.zeros([block_size, self.num_fft_bins]) # init the local group delay matrix self.lgd = None if lgd: self.lgd = np.zeros([self.num_frames, self.num_fft_bins], dtype=np.float32) # create windowing function for DFT self.window = np.hanning(frame_size) try: # the audio signal is not scaled, scale the window accordingly max_value = np.iinfo(self.wav.audio.dtype).max self._fft_window = self.window / max_value except ValueError: self._fft_window = self.window # step through all frames for frame in range(self.num_frames): # seek to the right position in the audio signal if online: # step back one frame_size after moving forward 1 hop_size # so that the current position is at the end of the window seek = int((frame + 1) * self.hop_size - frame_size) else: # step back half of the frame_size so that the frame represents # the centre of the window seek = int(frame * self.hop_size - frame_size / 2) # read in the right portion of the audio if seek >= self.wav.num_samples: # end of file reached break elif seek + frame_size >= self.wav.num_samples: # end behind the actual audio, append zeros accordingly zeros = np.zeros(seek + frame_size - self.wav.num_samples) signal = self.wav.audio[seek:] signal = np.append(signal, zeros) elif seek < 0: # start before the actual audio, pad with zeros accordingly zeros = np.zeros(-seek) signal = self.wav.audio[0:seek + frame_size] signal = np.append(zeros, signal) else: # normal read operation signal = self.wav.audio[seek:seek + frame_size] # multiply the signal with the window function signal = signal * self._fft_window # perform DFT stft = fft.fft(signal)[:self.num_fft_bins] # compute the local group delay if lgd: # unwrap the phase unwrapped_phase = np.unwrap(np.angle(stft)) # local group delay is the derivative over frequency self.lgd[frame, :-1] = (unwrapped_phase[:-1] - unwrapped_phase[1:]) # is block-wise processing needed? if filterbank is None: # no filtering needed, thus no block wise processing needed self.spec[frame] = np.abs(stft) else: # filter in blocks spec[frame % block_size] = np.abs(stft) # end of a block or end of the signal reached end_of_block = (frame + 1) / block_size > block end_of_signal = (frame + 1) == self.num_frames if end_of_block or end_of_signal: start = block * block_size stop = min(start + block_size, self.num_frames) filtered_spec = np.dot(spec[:stop - start], filterbank) self.spec[start:stop] = filtered_spec # increase the block counter block += 1 # next frame # take the logarithm if log: np.log10(mul * self.spec + add, out=self.spec)
# 中間バッファ zs = np.zeros(n_len) Zs = np.zeros(n_fft) # 出力バッファ ys = np.zeros(n_len) # 窓関数 window = np.hanning(n_fft) # FFT & IFFT for start in range(0, n_len - n_shift, n_shift): xs_cut = xs[start: start + n_fft] xs_win = xs_cut * window Xs = fft.fft(xs_win, n_fft) # some signal processing Zs = Xs zs = fft.ifft(Zs, n_fft) # write output buffer ys[start: start + n_fft] += np.real(zs) # 冒頭から10秒分プロット fig = plt.figure(1, figsize=(8, 10)) ax = fig.add_subplot(211) ax.plot(xs[:fs*10]) ax.set_title("input signal") ax.set_xlabel("time [pt]") ax.set_ylabel("amplitude")
def Traitement_Difference(nom_exp1, nom_exp2, nom_resultat, valeur_resistance, t1, t2, duree, amplification1=1., amplification2=1., diviseur=1., offset=0., freq_coupure=0.): import numpy as np from Graphique import Graphique_Simple from Graphique import Graphique_Double_Echelle from Integration import Integrale_Trapeze from Filtre import Passe_bas from scipy import fftpack data1 = np.loadtxt(nom_exp1 + '.dat', delimiter=';') data2 = np.loadtxt(nom_exp2 + '.dat', delimiter=';') Temps1 = np.asarray(data1[:, 0], dtype=float) / 1000000. Temps1 = Temps1 - Temps1[0] Tension1 = (np.asarray(data1[:, 1], dtype=float) - offset) * diviseur / amplification1 Temps2 = np.asarray(data2[:, 0], dtype=float) / 1000000. Temps2 = Temps2 - Temps2[0] Tension2 = (np.asarray(data2[:, 1], dtype=float) - offset) * diviseur / amplification1 i1 = 0 for i in range(0, Temps1.size): if abs(Temps1[i1] - t1) > abs(Temps1[i] - t1): i1 = i di = 0 for i in range(i1, Temps1.size): if abs(Temps1[di + i1] - (t1 + duree)) > abs(Temps1[i] - (t1 + duree)): di = i - i1 i2 = 0 for i in range(0, Temps2.size): if abs(Temps2[i2] - t2) > abs(Temps2[i] - t2): i2 = i Temps = np.zeros(di) T1 = np.zeros(di) T2 = np.zeros(di) for i in range(i1, i1 + di): Temps[i - i1] = Temps1[i] - Temps1[i1] T1[i - i1] = Tension1[i] for i in range(i2, i2 + di): T2[i - i2] = Tension2[i] Tension = T1 - T2 freq_echantillonage = Temps[Temps.size - 1] / Temps.size freqs = fftpack.fftfreq(Temps.size, d=freq_echantillonage) TFT_M = abs(fftpack.fft(Tension - np.mean(Tension))) TFT = abs(fftpack.fft(Tension)) Graphique_Simple(Temps, Tension, x_label='Temps [s]', y_label='Tension [V]', save_name='Graphique_' + nom_resultat + '_T', graph_name='Difference de la tension au cours du temps') Graphique_Simple( freqs, TFT, x_label='Temps [s]', y_label='Tension [V]', save_name='Graphique_' + nom_resultat + '_TFT', graph_name='TF Difference de la tension au cours du temps') Graphique_Simple( freqs, TFT_M, x_label='Temps [s]', y_label='Tension [V]', save_name='Graphique_' + nom_resultat + '_TTF-M', graph_name= 'TF sans la composante continu Difference de la tension au cours du temps' )
from scipy.fftpack import fft M = 64 N = 512 hN = N/2 hM = M/2 fftbuffer = np.zeros(N) mX1 = np.zeros(N) plt.figure(1, figsize=(4, 6)) fftbuffer[hN-hM:hN+hM]=np.hamming(M) plt.subplot(2,1,1) plt.plot(np.arange(-hN, hN), fftbuffer, 'b', lw=1.5) plt.axis([-hM, hM-1, 0, 1]) plt.title('w (hamming window), M=64') X = fft(fftbuffer) mX = 20*np.log10(abs(X)) mX1[:hN] = mX[hN:] mX1[N-hN:] = mX[:hN] plt.subplot(2,1,2) plt.plot(M*np.arange(-hN, hN)/float(N), mX1-max(mX), 'r', lw=1.5) plt.axis([-hM,hM-1,-55,0]) plt.title('mW') plt.tight_layout() plt.savefig('hamming.png') plt.show()
def testbench(): fs = 1000 # Frecuencia de muestreo N = np.linspace(30, 5000, 498) # Ns = np.array([10000], dtype=np.float) K = 10 # Cantidad de bloques overlap = 50 # Overlap en % # Parametros del ruido blanco gaussiano mean = 0 variance = 2 result_sesgo = [] result_var = [] #%% Variamos N for n in N: L = int(n/K) D = int(L * overlap/100) # Offset de los bloques cant_promedios = 1 + int((n-L)/D) # Cantidad de promedios realizados window = barlett(L) window = window/LA.norm(window) real_bin_psd = variance/L noise = np.random.normal(mean, np.sqrt(variance), int(n)) n1 = 0 psd_average = 0 for i in range(cant_promedios): noise_spectrum = fft(noise[n1:(n1+L)]*window, axis = 0) psd = pow((1/L)*np.abs(noise_spectrum), 2) psd_average = psd_average + psd/cant_promedios n1 = n1 + D psd_average = psd_average*L # NO TENGO IDEA DE DONDE SALE ESTE *L # Varianza del estimador bin_var = np.var(psd_average)*L**2 # Valor esperado de la PSD bin_PSD_esperado = np.average(psd_average) # Varianza de la señal, area del PSD varianza = np.sum(psd_average) sesgo = np.abs(real_bin_psd - bin_PSD_esperado) result_sesgo.append(sesgo) result_var.append(bin_var) plt.figure() plt.subplot(2,1,1) plt.plot(N, result_sesgo) plt.title("Sesgo en funcion de N") plt.xlabel("N") plt.ylabel("Sesgo") plt.yscale("log") plt.grid() plt.subplot(2,1,2) plt.plot(N, result_var) plt.title("Varianza en funcion de N") plt.xlabel("N") plt.ylabel("Varianza") # plt.yscale("log") plt.grid() # print("Varianza del bin promedio: " + str(bin_var)) # print("Valor esperado del bin: " + str(bin_PSD_esperado)) # print("Area: " + str(varianza)) # print("Sesgo: " + str(sesgo)) #%% Variamos K K = np.array([1, 2, 5, 10, 20, 25, 40, 50, 100], dtype=np.float) K = np.linspace(1, 200, 200) N = 1000 result_sesgo = [] result_var = [] df = [] for k in K: L = int(N/k) D = int(L * overlap/100) # Offset de los bloques cant_promedios = 1 + int((N-L)/D) # Cantidad de promedios realizados df.append(fs/L) window = barlett(L) window = window/LA.norm(window) real_bin_psd = variance/L noise = np.random.normal(mean, np.sqrt(variance), int(N)) n1 = 0 psd_average = 0 for i in range(cant_promedios): noise_spectrum = fft(noise[n1:(n1+L)]*window, axis = 0) psd = pow((1/L)*np.abs(noise_spectrum), 2) psd_average = psd_average + psd/cant_promedios n1 = n1 + D psd_average = psd_average*L # NO TENGO IDEA DE DONDE SALE ESTE *L # Varianza del estimador bin_var = np.var(psd_average)*L**2 # Valor esperado de la PSD bin_PSD_esperado = np.average(psd_average) # Varianza de la señal, area del PSD varianza = np.sum(psd_average) sesgo = np.abs(real_bin_psd - bin_PSD_esperado) result_sesgo.append(sesgo) result_var.append(bin_var) plt.figure() plt.subplot(2,1,1) plt.plot(K, df) plt.title("Resolucion espectral en funcion de K") plt.xlabel("K") plt.ylabel("$\Delta f$") # plt.yscale("log") plt.grid() plt.subplot(2,1,2) plt.plot(K, result_var) plt.title("Varianza en funcion de K") plt.xlabel("K") plt.ylabel("Varianza") plt.yscale("log") plt.grid() #%% Variamos overlap N = 1000 K= 10 overlap = np.linspace(10, 50, 5) # overlap = np.array([10, 50], dtype=np.float) L = int(N/K) result_sesgo = [] result_var = [] df = [] noise = np.random.normal(mean, np.sqrt(variance), int(N)) for OL in overlap: D = int(L * OL/100) # Offset de los bloques cant_promedios = 1 + int((N-L)/D) # Cantidad de promedios realizados df.append(fs/L) window = barlett(L) window = window/LA.norm(window) real_bin_psd = variance/L n1 = 0 psd_average = 0 for i in range(cant_promedios): noise_spectrum = fft(noise[n1:(n1+L)]*window, axis = 0) psd = pow((1/L)*np.abs(noise_spectrum), 2) psd_average = psd_average + psd/cant_promedios n1 = n1 + D psd_average = psd_average*L # NO TENGO IDEA DE DONDE SALE ESTE *L # Varianza del estimador bin_var = np.var(psd_average)*L**2 # Valor esperado de la PSD bin_PSD_esperado = np.average(psd_average) # Varianza de la señal, area del PSD varianza = np.sum(psd_average) sesgo = np.abs(real_bin_psd - bin_PSD_esperado) result_sesgo.append(sesgo) result_var.append(bin_var) plt.figure() plt.subplot(2,1,1) plt.plot(overlap, df) plt.title("Resolucion espectral en funcion del overlap") plt.xlabel("Overlap [%]") plt.ylabel("$\Delta f$") # plt.yscale("log") plt.grid() plt.subplot(2,1,2) plt.plot(overlap, result_var) plt.title("Varianza en funcion del overlap") plt.xlabel("Overlap [%]") plt.ylabel("Varianza") # plt.yscale("log") plt.grid()
SCALE = 1000 EXPONENT = 7 p = pyaudio.PyAudio() stream = p.open(format=pyaudio.paInt32, channels=1, rate=RATE, input=True, frames_per_buffer=CHUNK, input_device_index=INDEX) vol = 0 max = 0 maxCount = 0 while True: data = np.fromstring(stream.read(CHUNK), dtype=np.int32) freq = fft(data) # frequencies via Fast Fourier transform vol = int((vol + audioop.rms(data, 2)) / 2) # sequiential average # calculate volume from 0 to 1, 1 being max if vol > max: max = vol maxCount = 0 elif maxCount > 10: max *= .99 else: maxCount += 1 # find print((vol + 1) / (max + 1)) time.sleep(.01)
def plot_shh_anal_loc(): """ Function to plot multiple analytical power spectra along e.g. an aquifer in a 3D plot. Still not working because plot3d has issues with log scale ... """ # set parameters data_points = 8000 time_step_size = 86400 aquifer_length = 1000 Sy = 1e-1 T = 0.001 from calc_tc import calc_tc tc = calc_tc(aquifer_length, Sy, T) print(tc) import sys # add search path for own modules sys.path.append("/Users/houben/PhD/python/scripts/spectral_analysis") from shh_analytical import shh_analytical import numpy as np import matplotlib.pyplot as plt from matplotlib import cm from mpl_toolkits.mplot3d import Axes3D import scipy.fftpack as fftpack # create an input signal np.random.seed(123456789) input = np.random.rand(data_points) spectrum = fftpack.fft(input) # erase first half of spectrum spectrum = abs(spectrum[:round(len(spectrum) / 2)])**2 spectrum = spectrum # [1:] # X contains the different locations X = np.linspace(0, aquifer_length - 1, 10) X = [100, 900] # Y contains the frequencies, erase first data point because it's 0 Y = abs(fftpack.fftfreq(len(input), time_step_size))[:round(len(input) / 2)] Y = np.log10(Y[1:]) Z = np.zeros((len(Y), len(X))) for i, loc in enumerate(X): Z[:, i] = np.log10( shh_analytical((Y, spectrum), Sy=Sy, T=T, x=loc, L=aquifer_length, m=5, n=5, norm=False)) # erase first data point from Z for each location print(Z) print(Y) import matplotlib.pyplot as plt plt.plot(Y, Z[:, 0]) # plt.loglog(Y,Z[:,0]) # X, Y = np.meshgrid(X, Y) # fig = plt.figure() # ax = Axes3D(fig) # surf = ax.plot_surface( # X, Y, Z, rstride=1, cstride=2, shade=False, linewidth=1, cmap="Spectral_r" # ) # surf = ax.plot_wireframe(X, Y, Z, rstride=0, cstride=1, cmap=cm.magma) # surf.set_edgecolors(surf.to_rgba(surf._A)) # surf.set_facecolors("white") # ax1 = ax.plot_wireframe(X, Y, Z, rstride=1, cstride=0) # ax.set_xlabel("Location [m]") # ax.set_ylabel("Frequency [Hz]") # ax.set_zlabel("log Spectral Density") # ax.set_zscale("log") # ax.yaxis.set_scale("log") # ax.zaxis._set_scale('log') # ax.set_yscale("log") plt.show()
def trialfunction(input_data): trials_dic = {} dbc = 0 if Alc_train_extractor.shape == Con_train_extractor.shape: print('Same shape error:') print(X_train.shape) print(y_train.shape) raise SystemExit if (input_data.shape == Alc_train_extractor.shape) or ( input_data.shape == Alc_train_classifier.shape) or (input_data.shape == Alc_test.shape): dbc = EEG_data if (input_data.shape == Con_train_extractor.shape) or ( input_data.shape == Con_train_classifier.shape) or (input_data.shape == Con_test.shape): dbc = EEG_data_control for pos in input_data: Trial = dbc.loc[dbc['trial number'] == pos] columns = ['channel', 'time', 'sensor value'] Trial = Trial.pivot_table(index='channel', columns='time', values='sensor value') trials_dic[pos] = Trial RGB_dic = {} for key in trials_dic: data = trials_dic.get(key) # Get real amplitudes of FFT (only in postive frequencies) fft_raw = fft(data) fft_vals = np.absolute(fft_raw) fft_vals = normalize(fft_vals, axis=1) # Get frequencies for amplitudes in Hz fs = 256 # Sampling rate fft_freq = fftfreq(fs, 1.0 / fs) # Define EEG bands eeg_bands = { 'Theta': (4, 7), 'Alpha': (8, 12), 'Beta': (13, 30), } # Take the sum of squared absolute values/amplitudes for each EEG band eeg_band_fft = defaultdict(list) for band in eeg_bands: freq_ix = np.where((fft_freq >= eeg_bands[band][0]) & (fft_freq <= eeg_bands[band][1]))[0] for channel in fft_vals: filterdch = channel[freq_ix] sqdvals = np.square(filterdch) sumvals = np.sum(sqdvals, axis=0) eeg_band_fft[band].append(sumvals) extracted_df = pd.DataFrame(eeg_band_fft) neeg = EEG_data.drop(columns=[ 'matching condition', 'name', 'trial number', 'subject identifier', 'time', 'sample num', 'sensor value' ]) neeg = neeg.drop_duplicates() #get names of source elctrodes: extracted_df = extracted_df.reset_index(drop=True) neeg = neeg.reset_index(drop=True) e_names = neeg e_names = e_names.rename(columns={'sensor position': 0}) extracted_df = extracted_df.join(neeg) #get coordinates in 3d from robertoostenveld.nl/electrodes/plotting_1005.txt coords = pd.read_csv( '/kaggle/input/httpsrobertoostenveldnlelectrodes/plotting_1005.txt', sep='\t', header=None) coords = coords.drop(coords.columns[4], axis=1) #print(coords) testerd = pd.merge(e_names, coords, on=0, how='inner') testerd.set_index('channel', inplace=True) testerd.columns = ['pos', 'x', 'y', 'z'] extracted_df = extracted_df.rename(columns={'sensor position': "pos"}) #filter values and coordinates extracted_df = pd.merge(extracted_df, testerd, on="pos", how='inner') extracted_df = extracted_df.drop(['x', 'y', 'z'], axis=1) extracted_df.set_index('channel', inplace=True) extracted_df = extracted_df.drop(columns=['pos']) extracted_df.index.names = ['pos'] #adapted from https://www.samuelbosch.com/2014/02/azimuthal-equidistant-projection.html class Point(object): def __init__(self, x, y, z): self.x = x self.y = y self.z = z class AzimuthalEquidistantProjection(object): """ http://mathworld.wolfram.com/AzimuthalEquidistantProjection.html http://mathworld.wolfram.com/SphericalCoordinates.html """ def __init__(self): self.t1 = pi / 2 ## polar latitude center of projection , https://en.wikipedia.org/wiki/Azimuthal_equidistant_projection self.l0 = 0 ## arbitrary longitude center of projection self.cost1 = cos(self.t1) self.sint1 = sin(self.t1) def project(self, point): #ADDAPTED FOR 3D CARTESIAN TO SPHERICAL hxy = np.hypot(point.x, point.y) t = np.arctan2(point.z, hxy) l = np.arctan2(point.y, point.x) ### costcosll0 = cos(t) * cos(l - self.l0) sint = sin(t) c = acos((self.sint1) * (sint) + (self.cost1) * costcosll0) k = c / sin(c) x = k * cos(t) * sin(l - self.l0) y = k * (self.cost1 * sint - self.sint1 * costcosll0) return x, y #Projection df projected_df = pd.DataFrame() for index, row in testerd.iterrows(): x = row['x'] y = row['y'] z = row['z'] p = AzimuthalEquidistantProjection() r = p.project(Point(x, y, z)) r = pd.Series(r) projected_df = projected_df.append(r, ignore_index=True) projected_df = projected_df.rename(columns={0: 'X', 1: 'Y'}) ###map coodinate with valuies new_df = projected_df.join(extracted_df) new_df = new_df.drop([31]) # drop row because i contains no values #print(new_df) Theta_df = new_df.drop(['Alpha', 'Beta', 'X', 'Y'], axis=1) Alpha_df = new_df.drop(['Theta', 'Beta', 'X', 'Y'], axis=1) Beta_df = new_df.drop(['Theta', 'Alpha', 'X', 'Y'], axis=1) #map onto mesh xpoints = np.array(new_df[['X']].squeeze()) ypoints = np.array(new_df[['Y']].squeeze()) Thetavalues = np.array(Theta_df).squeeze() Alphavalues = np.array(Alpha_df).squeeze() Betavalues = np.array(Beta_df).squeeze() xx, yy = np.mgrid[-1.5:1.5:32j, -1.5:1.5:32j] Thetavalues = minmax_scale(Thetavalues, feature_range=(0.0, 1.0), axis=0) Alphavalues = minmax_scale(Alphavalues, feature_range=(0.0, 1.0), axis=0) Betavalues = minmax_scale(Betavalues, feature_range=(0.0, 1.0), axis=0) Thetagrid = griddata((xpoints, ypoints), Thetavalues, (xx, yy), method='cubic', fill_value=0.0) Alphagrid = griddata((xpoints, ypoints), Alphavalues, (xx, yy), method='cubic', fill_value=0.0) Betagrid = griddata((xpoints, ypoints), Betavalues, (xx, yy), method='cubic', fill_value=0.0) ##RGB construction RGB = np.empty((32, 32, 3)) RGB[:, :, 0] = Thetagrid RGB[:, :, 1] = Alphagrid RGB[:, :, 2] = Betagrid RGB_dic[key] = RGB ##creating new dict with new keys lendict = len(RGB_dic) #print('lendict: ',lendict) lenlist = np.arange(0, lendict) #print(lenlist) final_dict = dict(zip(lenlist, list(RGB_dic.values()))) return final_dict
def get_fft_values(y_values, T, N, f_s): f_values = np.linspace(0.0, 1.0 / (2.0 * T), N // 2) fft_values_ = fft(y_values) fft_values = 2.0 / N * np.abs(fft_values_[0:N // 2]) return f_values, fft_values
# In[2]: from scipy.fftpack import fft, ifft n = 3 x = [] for i in range(2**n): x.append(math.ceil(random.random() * 10)) print("x", x) Myfft = myfft(n, x) print("after Myfft", Myfft) Myifft1 = Difft(Myfft, n) _fft = fft(x) print("fft", _fft) print("after MyIfft", Myifft1) print("Myfft - fft", np.linalg.norm(Myfft - _fft)) print("Myfft - Myifft", np.linalg.norm(x - Myifft1)) # #print(np.linalg.norm(x - myifft(n, Myfft))) # In[12]: import matplotlib.pyplot as plt from scipy.fftpack import fft, ifft n = 16
Ts = 1 / Fs fsig = 10 plt.close('all') #Generamos un vector de tiempos. Permite mucha flexibilidad #Tener en cuenta que la primera muestra es a tiempo 0 por eso va el N-1 #El tercer parametros es cada N muestras- t = np.linspace(0.0, (N - 1) / Fs, N) #Señal senoidal #s = np.zeros(N) #s = np.sin(2*np.pi*fsig*t) s = 0.5 + signal.square(2 * np.pi * fsig * t) / 2 #2/N es el espectro normalizado, o sea el maximo va a llevar un '1' spectrum = (2 / N) * np.abs(sc.fft(s)) #La doble barra hace la division por enteros,para no tener un indice que no sea un valor con "," half = spectrum[0:N // 2] frec = np.linspace(0, Fs / 2, N // 2) plt.stem(frec, half) plt.show() plt.figure(2) plt.plot(t, s) plt.show()
x_pol = np.loadtxt(file_name, delimiter=' ', usecols=0) print("First column loaded. Loading second column....") y_pol = np.loadtxt(file_name, delimiter=' ', usecols=1) print("Data successfuly loaded. Preparing for FFT....") pulm_fft_x = np.array_split(x_pol, len(x_pol) / 512) pulm_fft_y = np.array_split(y_pol, len(y_pol) / 512) print(len(pulm_fft_y), len(pulm_fft_x)) #FFT print("Performing FFT....") ffted = [] # ffted_single = [] # residue = [] N = 512 for x, y in zip(pulm_fft_x, pulm_fft_y): yr_x = fft(x) # "raw" FFT with both + and - frequencies yr_y = fft(y) # "raw" FFT with both + and - frequencies ffted.append( np.real(yr_x[0:np.int(N / 2)] * (yr_y[0:np.int(N / 2)].conj()))) # residue.append(np.imag(yr_x[0:np.int(N/2)]*(yr_y[0:np.int(N/2)].conj()))) # ffted_single.append(np.abs(yr_y[0:np.int(N/2)])) #Stacking function def stacking(packets): temp = np.zeros(len(packets[0])) for packet in packets: temp = temp + packet return temp
import numpy as np from scipy.fftpack import fft import matplotlib.pyplot as plt from matplotlib.pylab import mpl mpl.rcParams['font.sans-serif'] = ['SimHei'] # 显示中文 mpl.rcParams['axes.unicode_minus'] = False # 显示负号 # 采样点选择1400个,因为设置的信号频率分量最高为600赫兹,根据采样定理知采样频率要大于信号频率2倍,所以这里设置采样频率为1400赫兹(即一秒内有1400个采样点,一样意思的) x = np.linspace(0, 1, 1400) # 设置需要采样的信号,频率分量有200,400和600 y = 7 * np.sin(2 * np.pi * 200 * x) + 5 * np.sin( 2 * np.pi * 400 * x) + 3 * np.sin(2 * np.pi * 600 * x) fft_y = fft(y) # 快速傅里叶变换 N = 1400 x = np.arange(N) # 频率个数 half_x = x[range(int(N / 2))] # 取一半区间 abs_y = np.abs(fft_y) # 取复数的绝对值,即复数的模(双边频谱) angle_y = np.angle(fft_y) # 取复数的角度 normalization_y = abs_y / N # 归一化处理(双边频谱) normalization_half_y = normalization_y[range(int(N / 2))] # 由于对称性,只取一半区间(单边频谱) plt.subplot(231) plt.plot(x, y) plt.title('原始波形') plt.subplot(232)
def enfileiramento_processamento(): global q_sample_pre_processado global CHUNK global my_array global my_array_show global my_array_fft global my_array_fft_show global my_array_MAX_amplitude global my_array_MEAN_rolling global my_array_MEAN_rolling_aux global my_array_MEAN_amplitude global my_array_MEAN_amplitude_array global my_array_MEAN_amplitude_array_aux global my_array_MIN_amplitude global my_array_MAX_REL global my_array_MAX_bit_delta global disparo_detectado global hora_do_disparo global amplitude_do_disparo global delta_bit_do_disparo my_array = 0 my_array_show = 0 my_array_fft = 0 my_array_fft_show = 0 my_array_MAX_amplitude = 0 my_array_MEAN_rolling = 1 # média móvel my_array_MEAN_rolling_aux = False my_array_MEAN_amplitude = 0 my_array_MEAN_amplitude_array = np.empty(my_array_MEAN_rolling,dtype=float) my_array_MEAN_amplitude_array_aux = 0 my_array_MIN_amplitude = 9999999999 my_array_MAX_REL = 0 my_array_MAX_bit_delta = 0 disparo_detectado = False hora_do_disparo = datetime.time() amplitude_do_disparo = 0 delta_bit_do_disparo = 0 contador = 0 while True: if q_sample_pre_processado.empty(): if disparo_detectado and time.time()-contador>=10: disparo_detectado = False my_array_show = 0 my_array_fft_show = 0 continue try: #Try to check if there is data in the queue my_array = q_sample_pre_processado.get_nowait() #my_array_fft = np.abs(fft(my_array)) * 2 / (256*CHUNK) # transformada consome muito processamento! #temp_MAX = np.amax(my_array) temp_MAX_index = np.where(my_array == np.amax(my_array)) temp_MAX_index_plus = len(temp_MAX_index[0]) temp_MAX = my_array[temp_MAX_index[0][0]] if my_array_MAX_amplitude <= temp_MAX: my_array_MAX_amplitude = temp_MAX temp_MEAN = np.mean(my_array) my_array_MEAN_amplitude_array[my_array_MEAN_amplitude_array_aux] = temp_MEAN my_array_MEAN_amplitude_array_aux+=1 if my_array_MEAN_amplitude_array_aux >= my_array_MEAN_rolling-1: my_array_MEAN_amplitude_array_aux = 0 my_array_MEAN_rolling_aux = True if my_array_MEAN_rolling_aux == True: my_array_MEAN_amplitude = np.around(np.mean(my_array_MEAN_amplitude_array),decimals=1) # temp_MIN = np.amin(my_array) temp_MIN_index = np.where(my_array == np.amin(my_array)) temp_MIN_index_plus = len(temp_MIN_index[0]) temp_MIN = my_array[temp_MIN_index[0][0]] if my_array_MIN_amplitude >= temp_MIN: my_array_MIN_amplitude = temp_MIN temp_MAX_MEAN = abs(temp_MEAN - temp_MAX) temp_MIN_MEAN = abs(temp_MEAN - temp_MIN) if temp_MAX_MEAN >= temp_MIN_MEAN: temp_MAX_REL = np.around(temp_MAX_MEAN,decimals=1) temp_MAX_REL_index = temp_MAX_index temp_MAX_REL_index_plus = temp_MAX_index_plus else: temp_MAX_REL = np.around(temp_MIN_MEAN,decimals=1) temp_MAX_REL_index = temp_MIN_index temp_MAX_REL_index_plus = temp_MIN_index_plus if my_array_MAX_REL < temp_MAX_REL: my_array_MAX_REL = temp_MAX_REL temp_diff_array = np.diff(np.array(my_array,dtype=int)) temp_diff_array_ABS = np.absolute(temp_diff_array) #temp_diff_array_ABS_MAX = np.amax(temp_diff_array_ABS) temp_diff_array_ABS_MAX_index = np.where(temp_diff_array_ABS == np.amax(temp_diff_array_ABS)) temp_diff_array_ABS_MAX_index_plus = len(temp_diff_array_ABS_MAX_index[0]) temp_diff_array_ABS_MAX = my_array[temp_diff_array_ABS_MAX_index[0][0]] if my_array_MAX_bit_delta <= temp_diff_array_ABS_MAX: my_array_MAX_bit_delta = temp_diff_array_ABS_MAX ###detecção do disparo if disparo_detectado == False and temp_MAX_REL >= 83 and temp_diff_array_ABS_MAX >= 20: disparo_detectado = True hora_do_disparo = datetime.datetime.now().strftime("%H:%M:%S") amplitude_do_disparo = [temp_MAX_REL,temp_MAX_REL_index[0][0],temp_MAX_REL_index_plus] delta_bit_do_disparo = [temp_diff_array_ABS_MAX,temp_diff_array_ABS_MAX_index[0][0],temp_diff_array_ABS_MAX_index_plus] my_array_fft_show = np.abs(fft(my_array)) * 2 / (256*CHUNK) # transformada consome muito processamento! contador = time.time() my_array_show = my_array #my_array_fft_show = my_array_fft if disparo_detectado == False: my_array_show = my_array except Exception as e: print (str(e))
# Define the sample spacing and window size. dT = 1.0/rate T_window = 50e-3 N_window = int(T_window * rate) N_data = len(data) # 1. Get the window profile window = get_window('hamming', N_window) # 2. Set up the FFT result = [] start = 0 while (start < N_data - N_window): end = start + N_window result.append(fftshift(fft(window*data[start:end]))) start = end result.append(fftshift(fft(window*data[-N_window:]))) result = array(result,result[0].dtype) # Display results freqscale = fftshift(fftfreq(N_window,dT))[150:-150]/1e3 figure(1) clf() imshow(abs(result[:,150:-150]), extent=(freqscale[-1], freqscale[0], (N_data*dT - T_window/2.0), T_window/2.0)) xlabel('Frequency (kHz)') ylabel('Time (sec.)') gray()
# SDNN.append(std(win_data)) # sum_diff = 0 # for j in range(len(win_data)-1): # diff = win_data[j+1] - win_data[j] # sum_diff += diff*diff # diff_data.append(diff) # rMSSD.append(sqrt(sum_diff/len(diff_data))) # SDSD.append(std(diff_data)) # return SDNN, rMSSD, SDSD raw_data = read_data(file_path) fftdata = raw_data - mean(raw_data) L = len(fftdata) temp = fft(fftdata, L) # A = abs(temp)/(L/2) # A[1] = A[1]/2 f = [i / L for i in range(L)] plt.plot(f[:int(L / 2)], temp[:int(L / 2)]) plt.show() # SDNN, SDSD, rMSSD= process_data(raw_data) # print(SDNN) # print('\n') # print(SDSD) # print('\n') # print(rMSSD) # plt.plot(SDNN) # plt.plot(SDSD)
plt.title('Original Frame') plt.grid(True) plt.subplot(2, 1, 2) plt.plot(audio_win2[ind]) plt.title('Frame After Windowing') plt.grid(True) """In the plot above both ends of the frame end on different places on the y axis. window brought the edges of each frame closer to zero. Now lets perform the FFT.We can now do an N-point FFT on each frame to calculate the frequency spectrum, which is also called Short-Time Fourier-Transform (STFT), where N is typically 256 or 512, NFFT = 512.""" audio_winT1 = np.transpose(audio_win1) audio_fft1 = np.empty((int(1 + FFT_size // 2), audio_winT1.shape[1]), dtype=np.complex64, order='F') for n in range(audio_fft1.shape[1]): audio_fft1[:, n] = fft.fft(audio_winT1[:, n], axis=0)[:audio_fft1.shape[0]] audio_fft1 = np.transpose(audio_fft1) audio_winT2 = np.transpose(audio_win2) audio_fft2 = np.empty((int(1 + FFT_size // 2), audio_winT2.shape[1]), dtype=np.complex64, order='F') for n in range(audio_fft2.shape[1]): audio_fft2[:, n] = fft.fft(audio_winT2[:, n], axis=0)[:audio_fft2.shape[0]] audio_fft2 = np.transpose(audio_fft2) """#Calculate signal power"""
a = int(fpstr, 16) fp = fixpt18tofloat(a) x[i] = fp # substitutes converted fixed point 18 values using same time base i = i + 1 fpstr = fixedptfile.readline() fixedptfile.close() #plt.plot(t,x) # plot using pyplot library from matplotlib package #plt.title('Sine wave f='+str(f)+' Hz') # plot title #plt.xlabel('Time (s)') # x-axis label #plt.ylabel('Amplitude') # y-axis label #plt.show() # display the figure NFFT = 32 y = x[0:NFFT - 1] X = fftshift(fft(y, NFFT)) #plt.subplots(nrows=1, ncols=1) #create figure handle fVals = np.arange(start=-NFFT / 2, stop=NFFT / 2) * fs / NFFT plt.plot(fVals, np.abs(X), 'b') plt.title('Double Sided FFT - with FFTShift') plt.xlabel('Frequency (Hz)') plt.ylabel('|DFT Values|') plt.xlim(-fs / 2, fs / 2) plt.xticks(np.arange(-fs / 2, fs / 2 + 1, fs / 5)) plt.show()
from scipy import signal from matplotlib import pyplot as plt from matplotlib import style import numpy as np from scipy.fftpack import fft, fftshift window = signal.blackmanharris(51) plt.plot(window) plt.title("BlackmanHarris Window") plt.ylabel("Amplitude") plt.xlabel("Sample") plt.figure() A = fft(window, 2048)/(len(window)/2.0) freq = np.linspace(-0.5, -0.5, len(A)) response = 20*np.log10(np.abs(fftshift(A/abs(A).max()))) plt.plot(freq, response) plt.axis([-0.5, 0.5, -120, 0]) plt.title("Frequency response of the BlackmanHarris Window") plt.ylabel("Normalized magnitude(dB)") plt.xlabel("Noarmalized frequency (cyles/sample)") plt.show()
def mfcc(input, nwin=256, nfft=512, fs=16000, nceps=13): """Compute Mel Frequency Cepstral Coefficients. Parameters ---------- input: ndarray input from which the coefficients are computed Returns ------- ceps: ndarray Mel-cepstrum coefficients mspec: ndarray Log-spectrum in the mel-domain. Notes ----- MFCC are computed as follows: * Pre-processing in time-domain (pre-emphasizing) * Compute the spectrum amplitude by windowing with a Hamming window * Filter the signal in the spectral domain with a triangular filter-bank, whose filters are approximatively linearly spaced on the mel scale, and have equal bandwith in the mel scale * Compute the DCT of the log-spectrum References ---------- .. [1] S.B. Davis and P. Mermelstein, "Comparison of parametric representations for monosyllabic word recognition in continuously spoken sentences", IEEE Trans. Acoustics. Speech, Signal Proc. ASSP-28 (4): 357-366, August 1980.""" # MFCC parameters: taken from auditory toolbox over = nwin - 160 # Pre-emphasis factor (to take into account the -6dB/octave rolloff of the # radiation at the lips level) prefac = 0.97 #lowfreq = 400 / 3. lowfreq = 133.33 #highfreq = 6855.4976 linsc = 200 / 3. logsc = 1.0711703 nlinfil = 13 nlogfil = 27 nfil = nlinfil + nlogfil w = hamming(nwin, sym=0) fbank = trfbank(fs, nfft, lowfreq, linsc, logsc, nlinfil, nlogfil)[0] #------------------ # Compute the MFCC #------------------ extract = preemp(input, prefac) framed = segment_axis(extract, nwin, over) * w # Compute the spectrum magnitude spec = np.abs(fft(framed, nfft, axis=-1)) # Filter the spectrum through the triangle filterbank mspec = np.log10(np.dot(spec, fbank.T)) # Use the DCT to 'compress' the coefficients (spectrum -> cepstrum domain) ceps = dct(mspec, type=2, norm='ortho', axis=-1)[:, :nceps] return ceps, mspec, spec
def test_axes_argument(self): # plane == ji_plane, x== kji_space plane1 = [[1, 2, 3], [4, 5, 6], [7, 8, 9]] plane2 = [[10, 11, 12], [13, 14, 15], [16, 17, 18]] plane3 = [[19, 20, 21], [22, 23, 24], [25, 26, 27]] ki_plane1 = [[1, 2, 3], [10, 11, 12], [19, 20, 21]] ki_plane2 = [[4, 5, 6], [13, 14, 15], [22, 23, 24]] ki_plane3 = [[7, 8, 9], [16, 17, 18], [25, 26, 27]] jk_plane1 = [[1, 10, 19], [4, 13, 22], [7, 16, 25]] jk_plane2 = [[2, 11, 20], [5, 14, 23], [8, 17, 26]] jk_plane3 = [[3, 12, 21], [6, 15, 24], [9, 18, 27]] kj_plane1 = [[1, 4, 7], [10, 13, 16], [19, 22, 25]] kj_plane2 = [[2, 5, 8], [11, 14, 17], [20, 23, 26]] kj_plane3 = [[3, 6, 9], [12, 15, 18], [21, 24, 27]] ij_plane1 = [[1, 4, 7], [2, 5, 8], [3, 6, 9]] ij_plane2 = [[10, 13, 16], [11, 14, 17], [12, 15, 18]] ij_plane3 = [[19, 22, 25], [20, 23, 26], [21, 24, 27]] ik_plane1 = [[1, 10, 19], [2, 11, 20], [3, 12, 21]] ik_plane2 = [[4, 13, 22], [5, 14, 23], [6, 15, 24]] ik_plane3 = [[7, 16, 25], [8, 17, 26], [9, 18, 27]] ijk_space = [jk_plane1, jk_plane2, jk_plane3] ikj_space = [kj_plane1, kj_plane2, kj_plane3] jik_space = [ik_plane1, ik_plane2, ik_plane3] jki_space = [ki_plane1, ki_plane2, ki_plane3] kij_space = [ij_plane1, ij_plane2, ij_plane3] x = array([plane1, plane2, plane3]) assert_array_almost_equal(fftn(x), fftn(x, axes=(-3, -2, -1))) # kji_space assert_array_almost_equal(fftn(x), fftn(x, axes=(0, 1, 2))) assert_array_almost_equal(fftn(x, axes=(0, 2)), fftn(x, axes=(0, -1))) y = fftn(x, axes=(2, 1, 0)) # ijk_space assert_array_almost_equal(swapaxes(y, -1, -3), fftn(ijk_space)) y = fftn(x, axes=(2, 0, 1)) # ikj_space assert_array_almost_equal(swapaxes(swapaxes(y, -1, -3), -1, -2), fftn(ikj_space)) y = fftn(x, axes=(1, 2, 0)) # jik_space assert_array_almost_equal(swapaxes(swapaxes(y, -1, -3), -3, -2), fftn(jik_space)) y = fftn(x, axes=(1, 0, 2)) # jki_space assert_array_almost_equal(swapaxes(y, -2, -3), fftn(jki_space)) y = fftn(x, axes=(0, 2, 1)) # kij_space assert_array_almost_equal(swapaxes(y, -2, -1), fftn(kij_space)) y = fftn(x, axes=(-2, -1)) # ji_plane assert_array_almost_equal(fftn(plane1), y[0]) assert_array_almost_equal(fftn(plane2), y[1]) assert_array_almost_equal(fftn(plane3), y[2]) y = fftn(x, axes=(1, 2)) # ji_plane assert_array_almost_equal(fftn(plane1), y[0]) assert_array_almost_equal(fftn(plane2), y[1]) assert_array_almost_equal(fftn(plane3), y[2]) y = fftn(x, axes=(-3, -2)) # kj_plane assert_array_almost_equal(fftn(x[:, :, 0]), y[:, :, 0]) assert_array_almost_equal(fftn(x[:, :, 1]), y[:, :, 1]) assert_array_almost_equal(fftn(x[:, :, 2]), y[:, :, 2]) y = fftn(x, axes=(-3, -1)) # ki_plane assert_array_almost_equal(fftn(x[:, 0, :]), y[:, 0, :]) assert_array_almost_equal(fftn(x[:, 1, :]), y[:, 1, :]) assert_array_almost_equal(fftn(x[:, 2, :]), y[:, 2, :]) y = fftn(x, axes=(-1, -2)) # ij_plane assert_array_almost_equal(fftn(ij_plane1), swapaxes(y[0], -2, -1)) assert_array_almost_equal(fftn(ij_plane2), swapaxes(y[1], -2, -1)) assert_array_almost_equal(fftn(ij_plane3), swapaxes(y[2], -2, -1)) y = fftn(x, axes=(-1, -3)) # ik_plane assert_array_almost_equal(fftn(ik_plane1), swapaxes(y[:, 0, :], -1, -2)) assert_array_almost_equal(fftn(ik_plane2), swapaxes(y[:, 1, :], -1, -2)) assert_array_almost_equal(fftn(ik_plane3), swapaxes(y[:, 2, :], -1, -2)) y = fftn(x, axes=(-2, -3)) # jk_plane assert_array_almost_equal(fftn(jk_plane1), swapaxes(y[:, :, 0], -1, -2)) assert_array_almost_equal(fftn(jk_plane2), swapaxes(y[:, :, 1], -1, -2)) assert_array_almost_equal(fftn(jk_plane3), swapaxes(y[:, :, 2], -1, -2)) y = fftn(x, axes=(-1,)) # i_line for i in range(3): for j in range(3): assert_array_almost_equal(fft(x[i, j, :]), y[i, j, :]) y = fftn(x, axes=(-2,)) # j_line for i in range(3): for j in range(3): assert_array_almost_equal(fft(x[i, :, j]), y[i, :, j]) y = fftn(x, axes=(0,)) # k_line for i in range(3): for j in range(3): assert_array_almost_equal(fft(x[:, i, j]), y[:, i, j]) y = fftn(x, axes=()) # point assert_array_almost_equal(y, x)
def autocorrelate(y, max_size=None, axis=-1): """Bounded auto-correlation Parameters ---------- y : np.ndarray array to autocorrelate max_size : int > 0 or None maximum correlation lag. If unspecified, defaults to `y.shape[axis]` (unbounded) axis : int The axis along which to autocorrelate. By default, the last axis (-1) is taken. Returns ------- z : np.ndarray truncated autocorrelation `y*y` along the specified axis. If `max_size` is specified, then `z.shape[axis]` is bounded to `max_size`. Notes ----- This function caches at level 20. Examples -------- Compute full autocorrelation of y >>> y, sr = librosa.load(librosa.util.example_audio_file(), offset=20, duration=10) >>> librosa.autocorrelate(y) array([ 3.226e+03, 3.217e+03, ..., 8.277e-04, 3.575e-04], dtype=float32) Compute onset strength auto-correlation up to 4 seconds >>> import matplotlib.pyplot as plt >>> odf = librosa.onset.onset_strength(y=y, sr=sr, hop_length=512) >>> ac = librosa.autocorrelate(odf, max_size=4* sr / 512) >>> plt.plot(ac) >>> plt.title('Auto-correlation') >>> plt.xlabel('Lag (frames)') """ if max_size is None: max_size = y.shape[axis] max_size = int(min(max_size, y.shape[axis])) # Compute the power spectrum along the chosen axis # Pad out the signal to support full-length auto-correlation. powspec = np.abs(fft.fft(y, n=2 * y.shape[axis] + 1, axis=axis))**2 # Convert back to time domain autocorr = fft.ifft(powspec, axis=axis, overwrite_x=True) # Slice down to max_size subslice = [slice(None)] * autocorr.ndim subslice[axis] = slice(max_size) autocorr = autocorr[tuple(subslice)] if not np.iscomplexobj(y): autocorr = autocorr.real return autocorr
print("output_file:",output_file) temp = pd.read_csv(input_file, sep = ',', header=None,engine = 'c') samples = temp.shape[0] timestamp = temp.shape[1] print(samples,timestamp) for i in range(samples): index = temp[0][i] y =np.array(temp[i:i+1]) y=y[0] y = y[1:] for chacter in range(len(y)): if y[chacter] == '-': y[chacter]=0 y = fft(y) y = np.abs(y) data = np.vstack((data,y)) index_array = np.vstack((index_array,index)) #print(data.shape[0],data.shape[1]) print(index_array) index_array = index_array[1:] data = data[1:] excel = np.hstack((index_array,data)) print(excel.shape[0],excel.shape[1]) csvFile = open(output_file, "w",newline='') #创建csv文件 writer = csv.writer(csvFile) #创建写的对象 writer.writerows(excel) csvFile.close()
def direct_dftn(x): x = asarray(x) for axis in range(len(x.shape)): x = fft(x, axis=axis) return x
for run in run_array: path_test = f'rawdata/run_000{run}/run000{run}_ch{channel}.txt' l_test = np.loadtxt(path_test) l_test = l_test * factor l_test = l_test.reshape(-1, data_test+2) l_test = l_test[:, 2:] #move baseline test_ave = np.mean(l_test[:, 0:200], axis = 1) for i in range(l_test.shape[0]): l_test[i:i+1, :] = l_test[i:i+1, :] - test_ave[i] #fft lf_test = fft(l_test) lf_test_real = lf_test.real nan_array = np.where(lf_test_real == 0) nan_array = np.array(nan_array) print(nan_array) lf_test_imag = lf_test.imag #delete nan array if nan_array.shape[1] != 0: row = 0 for i in range(nan_array.shape[1]): print("delete: ", nan_array[0, i]) lf_test = np.delete(lf_test, nan_array[0, i] - row, 0) lf_test_real = np.delete(lf_test_real, nan_array[0, i] - row, 0) lf_test_imag = np.delete(lf_test_imag, nan_array[0, i] - row, 0) row = row + 1
def test_1_argument_real(self): x1 = np.array([1, 2, 3, 4], dtype=np.float16) y = fft(x1, n=4) assert_equal(y.dtype, np.complex64) assert_equal(y.shape, (4, )) assert_array_almost_equal(y, direct_dft(x1.astype(np.float32)))
def direct_hilbert(x): fx = fft(x) n = len(fx) w = fftfreq(n) * n w = 1j * sign(w) return ifft(w * fx)
def stft(x, wsize, tstep=None, verbose=None): """STFT Short-Term Fourier Transform using a sine window. The transformation is designed to be a tight frame that can be perfectly inverted. It only returns the positive frequencies. Parameters ---------- x : array, shape (n_signals, n_times) containing multi-channels signal wsize : int length of the STFT window in samples (must be a multiple of 4) tstep : int step between successive windows in samples (must be a multiple of 2, a divider of wsize and smaller than wsize/2) (default: wsize/2) verbose : bool, str, int, or None If not None, override default verbose level (see :func:`mne.verbose` and :ref:`Logging documentation <tut_logging>` for more). Returns ------- X : array, shape (n_signals, wsize // 2 + 1, n_step) STFT coefficients for positive frequencies with n_step = ceil(T / tstep) Examples -------- X = stft(x, wsize) X = stft(x, wsize, tstep) See Also -------- istft stftfreq """ if not np.isrealobj(x): raise ValueError("x is not a real valued array") if x.ndim == 1: x = x[None, :] n_signals, T = x.shape wsize = int(wsize) # Errors and warnings if wsize % 4: raise ValueError('The window length must be a multiple of 4.') if tstep is None: tstep = wsize / 2 tstep = int(tstep) if (wsize % tstep) or (tstep % 2): raise ValueError('The step size must be a multiple of 2 and a ' 'divider of the window length.') if tstep > wsize / 2: raise ValueError('The step size must be smaller than half the ' 'window length.') n_step = int(ceil(T / float(tstep))) n_freq = wsize // 2 + 1 logger.info("Number of frequencies: %d" % n_freq) logger.info("Number of time steps: %d" % n_step) X = np.zeros((n_signals, n_freq, n_step), dtype=np.complex) if n_signals == 0: return X # Defining sine window win = np.sin(np.arange(.5, wsize + .5) / wsize * np.pi) win2 = win ** 2 swin = np.zeros((n_step - 1) * tstep + wsize) for t in range(n_step): swin[t * tstep:t * tstep + wsize] += win2 swin = np.sqrt(wsize * swin) # Zero-padding and Pre-processing for edges xp = np.zeros((n_signals, wsize + (n_step - 1) * tstep), dtype=x.dtype) xp[:, (wsize - tstep) // 2: (wsize - tstep) // 2 + T] = x x = xp for t in range(n_step): # Framing wwin = win / swin[t * tstep: t * tstep + wsize] frame = x[:, t * tstep: t * tstep + wsize] * wwin[None, :] # FFT fframe = fft(frame) X[:, :, t] = fframe[:, :n_freq] return X
while len(Feature) < 5 * TaskSetting['Initiation']: if not Data_Queue.empty(): Sample = Data_Queue.get() if not EEG_Recording.any(): EEG_Recording = Sample else: EEG_Recording = np.append(EEG_Recording, Sample, axis=0) # Common Average Reference Sample[:, range(1, 9)] = Sample[:, range(1, 9)] - np.tile( np.mean(Sample[:, range(1, 9)], axis=1), (Sample.shape[1] - 1, 1)).T # Power after Referece Power = abs(fft(Sample[:, Channel])) Feature.append(np.sum(Power[Frequency_Band])) FIFO.Rewrite(Trigger_Log, "Calibration On") Trigger = np.array([[timeit.default_timer() - EEG_Time, CALIBRATION_START]]) print "Calibration Stage 1" Current_Index = len(Feature) """""" """""" """""" """""" """""" """""" """""" """ Resting State. Movement Range Calculation. """ """""" """""" """""" """""" """""" """""" """""" while len(Feature) - Current_Index < 5 * TaskSetting['Calibration']: if not Data_Queue.empty(): Sample = Data_Queue.get() Feature, EEG_Recording = FeatureExtraction(Sample, Feature, EEG_Recording)
def featurize_window(self, df_fw, bar): local_dict = OrderedDict() if self.mode > 0 and self.mode < 3: if df_fw.index.size >= (30*self.window_size_in_minutes): df_fw.double_x = df_fw.double_x.replace({0:1e-08}) df_fw.double_y = df_fw.double_y.replace({0:1e-08}) df_fw.double_z = df_fw.double_z.replace({0:1e-08}) f_x = scipy.interpolate.interp1d(df_fw.timestamp, df_fw.double_x) f_y = scipy.interpolate.interp1d(df_fw.timestamp, df_fw.double_y) f_z = scipy.interpolate.interp1d(df_fw.timestamp, df_fw.double_z) r = (np.sqrt(df_fw.double_x**2 + df_fw.double_y**2 + df_fw.double_z**2)).replace({0:1e-08}) f_r = scipy.interpolate.interp1d(df_fw.timestamp, r) xnew = [] step = (df_fw.timestamp.iloc[-1] - df_fw.timestamp.iloc[0]) /df_fw.index.size for ti in range(df_fw.timestamp.iloc[0], df_fw.timestamp.iloc[-1], int(step)): xnew.append(ti) f_fs = self.window_size_in_minutes * 60 / df_fw.index.size L = 512 # change it to 512 local_dict.update({'skip_fft':False, 'fx': f_x(xnew), 'fy': f_y(xnew), 'fz': f_z(xnew), 'fr': f_r(xnew), 'fs': f_fs, 'L': L}) else: local_dict.update({'skip_fft':True}) if df_fw.index.size == 0: local_dict['skip_td'] = True else: local_dict['skip_td'] = False if self.mode == 0: local_dict['skip_fft'] = True if df_fw.index.size == 0: local_dict['skip_td'] = True else: local_dict['skip_td'] = False if self.mode == 3: local_dict['skip_fft'] = True local_dict['skip_td'] = True feat_dict = {} #window information: feat_dict.update({'start_timestamp':df_fw.timestamp[0]}) feat_dict.update({'end_timestamp':df_fw.timestamp[0] + 6*10**3}) feat_dict.update({'sample_count':df_fw.index.size}) for feature in self.acc_features: if feature == 'int_desc': if not local_dict['skip_td']: int_desc = np.sqrt((df_fw.double_x ** 2).describe() + (df_fw.double_y **2).describe() + (df_fw.double_z ** 2).describe()) feat_dict.update({'int_mean': int_desc[1], 'int_std': int_desc[2], 'int_min': int_desc[3],'int_25': int_desc[4], 'int_50': int_desc[5],'int_75': int_desc[6]}) else: feat_dict.update({'int_mean': np.nan, 'int_std': np.nan, 'int_min': np.nan,'int_25': np.nan, 'int_50': np.nan,'int_75': np.nan}) elif feature == 'int_rms': if not local_dict['skip_td']: int_rms = np.sqrt((df_fw.double_x**2).sum() + (df_fw.double_y**2).sum() + (df_fw.double_z**2).sum()) / np.sqrt(df_fw.index.size) feat_dict.update({'int_rms':int_rms}) else: feat_dict.update({'int_rms': np.nan}) elif feature == 'mag_desc': if not local_dict['skip_td']: mag_desc = np.sqrt(df_fw.double_x**2 + df_fw.double_y**2 + df_fw.double_z**2).describe() feat_dict.update({'mag_mean': mag_desc[1], 'mag_std': mag_desc[2], 'mag_min': mag_desc[3], 'mag_25': mag_desc[4], 'mag_50': mag_desc[5],'mag_75': mag_desc[6]}) else: feat_dict.update({'mag_mean': np.nan, 'mag_std': np.nan, 'mag_min': np.nan, 'mag_25': np.nan, 'mag_50': np.nan,'mag_75': np.nan}) elif feature == 'pear_coef': if not local_dict['skip_td']: cov_matrix = np.cov(np.stack((df_fw.double_x,df_fw.double_y, df_fw.double_z), axis=0)) pear_coef_xy = cov_matrix[0,1] / (df_fw.double_x.std() * df_fw.double_y.std()) pear_coef_yz = cov_matrix[1,2] / (df_fw.double_y.std() * df_fw.double_z.std()) pear_coef_xz = cov_matrix[0,2] / (df_fw.double_x.std() * df_fw.double_z.std()) feat_dict.update({'pear_coef_xy':pear_coef_xy, 'pear_coef_yz':pear_coef_yz,'pear_coef_xz':pear_coef_xz }) else: feat_dict.update({'pear_coef_xy':np.nan, 'pear_coef_yz':np.nan,'pear_coef_xz':np.nan}) elif feature == 'sma': if not local_dict['skip_td']: sma = (np.abs(df_fw.double_x.to_numpy()).sum() + np.abs(df_fw.double_y.to_numpy()).sum() + np.abs(df_fw.double_z.to_numpy()).sum()) / df_fw.index.size feat_dict.update({'sma':sma}) else: feat_dict.update({'sma':np.nan}) elif feature == 'svm': if not local_dict['skip_td']: svm = np.sqrt(df_fw.double_x**2 + df_fw.double_y**2 + df_fw.double_z**2).sum() / df_fw.index.size feat_dict.update({'svm':svm}) else: feat_dict.update({'svm':np.nan}) elif feature == 'fft': if not local_dict['skip_fft']: L = local_dict['L'] dfx = fftpack.fft(local_dict['fx'], 512) dfy = fftpack.fft(local_dict['fy'], 512) dfz = fftpack.fft(local_dict['fz'], 512) dfr = fftpack.fft(local_dict['fr'], 512) # DC component # Remove the L part! feat_dict.update({'fdc_x': np.mean(np.real(dfx)), 'fdc_y': np.mean(np.real(dfy)), 'fdc_z': np.mean(np.real(dfz)), 'fdc_r': np.mean(np.real(dfr))}) # Energy feat_dict.update({'feng_x': (np.sum(np.real(dfx)**2 + np.imag(dfx)**2)) / L, 'feng_y': (np.sum(np.real(dfy)**2 + np.imag(dfy)**2)) / L, 'feng_z': (np.sum(np.real(dfz)**2 + np.imag(dfz)**2)) / L, 'feng_r': (np.sum(np.real(dfr)**2 + np.imag(dfr)**2)) / L}) # Entropy ck_x = np.sqrt(np.real(dfx)**2 + np.imag(dfx)**2) cj_x = ck_x / np.sum(ck_x) e_x = np.sum(cj_x * np.log(cj_x)) ck_y = np.sqrt(np.real(dfy)**2 + np.imag(dfy)**2) cj_y = ck_y / np.sum(ck_y) e_y = np.sum(cj_y * np.log(cj_y)) ck_z = np.sqrt(np.real(dfz)**2 + np.imag(dfz)**2) cj_z = ck_z / np.sum(ck_z) e_z = np.sum(cj_z * np.log(cj_z)) ck_r = np.sqrt(np.real(dfr)**2 + np.imag(dfr)**2) cj_r = ck_r / np.sum(ck_r) e_r = np.sum(cj_r * np.log(cj_r)) feat_dict.update({'fent_x': e_x, 'fent_y': e_y,'fent_z': e_z, 'fent_r': e_r}) # Correlation # Fix the length, should be FFT wndow size 512 fcorr_xy = np.dot(np.real(dfx) / L, np.real(dfy) / L) fcorr_xz = np.dot(np.real(dfx) / L, np.real(dfz) / L) fcorr_yz = np.dot(np.real(dfy) / L, np.real(dfz) / L) feat_dict.update({'fcorr_xy': fcorr_xy,'fcorr_xz': fcorr_xz, 'fcorr_yz': fcorr_yz}) else: feat_dict.update({'fdc_x': np.nan, 'fdc_y': np.nan,'fdc_z': np.nan, 'fdc_r': np.nan}) feat_dict.update({'feng_x': np.nan, 'feng_y': np.nan, 'feng_z': np.nan, 'feng_r': np.nan}) feat_dict.update({'fent_x': np.nan, 'fent_y': np.nan,'fent_z': np.nan, 'fent_r': np.nan}) feat_dict.update({'fcorr_xy': np.nan,'fcorr_xz': np.nan, 'fcorr_yz': np.nan}) elif feature == 'psd': if not local_dict['skip_fft']: fs = local_dict['fs'] psd_window = signal.get_window('boxcar', len(local_dict['fx'])) # do not pass this window freqs_x, pxx_denx = signal.periodogram(local_dict['fx'], window=psd_window, fs=fs) freqs_y, pxx_deny = signal.periodogram(local_dict['fy'], window=psd_window, fs=fs) freqs_z, pxx_denz = signal.periodogram(local_dict['fz'], window=psd_window, fs=fs) freqs_r, pxx_denr = signal.periodogram(local_dict['fr'], window=psd_window, fs=fs) feat_dict.update({'psd_mean_x': np.mean(pxx_denx), 'psd_mean_y': np.mean(pxx_deny), 'psd_mean_z': np.mean(pxx_denz), 'psd_mean_r': np.mean(pxx_denr)}) feat_dict.update({'psd_max_x': np.max(pxx_denx), 'psd_max_y': np.max(pxx_deny), 'psd_max_z': np.max(pxx_denz), 'psd_max_r': np.max(pxx_denr)}) freqs_05_3_x = np.argwhere((freqs_x >= 0.5) & (freqs_x <= 3)) freqs_05_3_y = np.argwhere((freqs_y >= 0.5) & (freqs_y <= 3)) freqs_05_3_z = np.argwhere((freqs_z >= 0.5) & (freqs_z <= 3)) freqs_05_3_r = np.argwhere((freqs_r >= 0.5) & (freqs_r <= 3)) # max b/w 0.3 - 3Hz # 0.5 - 3 Hz if missing, maybe not 0.0 feat_dict.update({'psd_max_x_05_3': np.max(pxx_denx[freqs_05_3_x]) if freqs_05_3_x.any() else 0.0, 'psd_max_y_05_3': np.max(pxx_deny[freqs_05_3_y]) if freqs_05_3_y.any() else 0.0, 'psd_max_z_05_3': np.max(pxx_denz[freqs_05_3_z]) if freqs_05_3_z.any() else 0.0, 'psd_max_r_05_3': np.max(pxx_denr[freqs_05_3_r]) if freqs_05_3_r.any() else 0.0}) else: feat_dict.update({'psd_mean_x': np.nan, 'psd_mean_y':np.nan, 'psd_mean_z': np.nan, 'psd_mean_r': np.nan}) feat_dict.update({'psd_max_x': np.nan, 'psd_max_y': np.nan, 'psd_max_z': np.nan, 'psd_max_r': np.nan}) feat_dict.update({'psd_max_x_05_3': np.nan, 'psd_max_y_05_3': np.nan, 'psd_max_z_05_3': np.nan, 'psd_max_r_05_3': np.nan}) elif feature == 'lmbs': if not local_dict['skip_td']: lmb_f_05_3 = np.linspace(0.5, 3, 100) lmb_psd_x = signal.lombscargle(df_fw.timestamp, df_fw.double_x, lmb_f_05_3, normalize=False) lmb_psd_y = signal.lombscargle(df_fw.timestamp, df_fw.double_y, lmb_f_05_3, normalize=False) lmb_psd_z = signal.lombscargle(df_fw.timestamp, df_fw.double_z, lmb_f_05_3, normalize=False) feat_dict.update({'lmb_psd_max_x_05_3': np.max(lmb_psd_x) if lmb_psd_x.any() else 0.0, 'lmb_psd_max_y_05_3': np.max(lmb_psd_y) if lmb_psd_y.any() else 0.0, 'lmb_psd_max_z_05_3': np.max(lmb_psd_z) if lmb_psd_z.any() else 0.0}) else: feat_dict.update({'lmb_psd_max_x_05_3': np.nan, 'lmb_psd_max_y_05_3': np.nan, 'lmb_psd_max_z_05_3': np.nan}) bar.next() return pd.Series(feat_dict)