def FFT(self): for o in self.object_list: x_list = [] y_list = [] # print o.chaincode print "----" print o.object_name + "_1 " + str(o.point_list[0][0]) + " " + str(o.point_list[0][1]) + " 1 1" for d in o.chaincode: print d, print "-1" print "----" x = y = 0 for d in o.chaincode: x += DIRECTION_MATRIX[d][0] y -= DIRECTION_MATRIX[d][1] x_list.append(x) y_list.append(y) n_point = len(x_list) x_fft_result = fft.rfft(x_list) y_fft_result = fft.rfft(y_list) for i in range(20): x = x_fft_result[i] y = y_fft_result[i] print "%e %e %e %e" % ( x.real, x.imag, y.real, y.imag ) x_list_2 = fft.irfft(x_fft_result[0:10], n_point) y_list_2 = fft.irfft(y_fft_result[0:10], n_point) print "x" print x_list print x_list_2 print "y" print y_list print y_list_2
def fft_lowpass(nelevation, low_bound, high_bound): """ Performs a low pass filter on the nelevation series. low_bound and high_bound specifes the boundary of the filter. """ import numpy.fft as F if len(nelevation) % 2: result = F.rfft(nelevation, len(nelevation)) else: result = F.rfft(nelevation) freq = F.fftfreq(len(nelevation))[:len(nelevation)/2] factor = np.ones_like(result) factor[freq > low_bound] = 0.0 sl = np.logical_and(high_bound < freq, freq < low_bound) a = factor[sl] # Create float array of required length and reverse a = np.arange(len(a) + 2).astype(float)[::-1] # Ramp from 1 to 0 exclusive a = (a/a[0])[1:-1] # Insert ramp into factor factor[sl] = a print factor[sl] result = result * factor print result print 'resultnog=', len(result) relevation = F.irfft(result, len(nelevation)) print 'result=', len(relevation) return relevation
def record2vecs(File): # Read the rr data (Note that it is sampled irregularly and # consists of the r-times in centiseconds) and return a high # frequency spectrogram. import cinc2000 from numpy.fft import rfft, irfft data = [] for line in File: data.append(float(line)/100) File.close() # Now data[i] is an r-time in seconds. hrd = cinc2000.R_times2Dev(data) # hrd are heart rate deviations sampled at 2 Hz pad = np.zeros(((Glength+len(hrd)),),np.float64) pad[Gl_2:len(hrd)+Gl_2] = hrd # Now pad is hrd with Gl_2 zeros before and after N_out = len(hrd)//RDt result = np.zeros((N_out,Fw/2),np.float64) mags = [] for k in range(N_out): i = int(RDt*k) WD = Gw*pad[i:i+Glength] # Multiply data by window fuction FT = rfft(WD,n=Fw) SP = np.conjugate(FT)*FT # Periodogram temp = rfft(SP,n=Fw//2) SP = irfft(temp[0:int(0.1*Fw/2)],n=Fw/2) # Low pass filter in freq domain. Pass below 0.1 Nyquist temp = SP.real[:Fw/2] mag = math.sqrt(np.dot(temp,temp)) result[k,:] = temp/mag mags.append(math.log(mag)) # result[k,:] is a unit vector and a smoothed periodogram return [result,mags]
def macro_mismatch(self,p1,p2): """ Performs double convolution with two different periods to calculate macroscopic average of a charge density along the z-axis. """ from numpy.fft import rfft,irfft # Convert periods to direct units, if given in cartesian if p1 > 1.0: p1 = p1/self.unitcell.cell_vec[2,2] if p2 > 1.0: p2 = p2/self.unitcell.cell_vec[2,2] # Create xy-plane averaged density micro_z = self.integrate_z_density() # Create convolutions z_pos = np.linspace(0,1,len(micro_z)) # Find index of lowest lower bound for p1 low = 1.-p1/2. i1 = len(z_pos)-1 while True: if z_pos[i1] <= low: i1 += 1 break i1 -= 1 #Find index of lowest upper bound for p1 high = p1/2. j1 = 0 while True: if z_pos[j1] >= high: j1 -= 1 break j1 += 1 # Find index of lowest lower bound for p2 low = 1.-p2/2. i2 = len(z_pos)-1 while True: if z_pos[i2] <= low: i2 += 1 break i2 -= 1 #Find index of lowest upper bound for p2 high = p2/2. j2 = 0 while True: if z_pos[j2] >= high: j2 -= 1 break j2 += 1 conv1 = np.zeros(len(micro_z)) conv1[0:j1+1] = np.ones(j1+1) conv1[i1:] = np.ones(len(conv1[i1:])) conv2 = np.zeros(len(micro_z)) conv2[0:j2+1] = np.ones(j2+1) conv2[i2:] = np.ones(len(conv2[i2:])) # Perform convolutions in Fourier Space f_micro_z = rfft(micro_z) f_conv1 = rfft(conv1) f_conv2 = rfft(conv2) f_macro = f_conv2*f_conv1*f_micro_z macro_z = irfft(f_macro)/float(np.sum(conv1))/float(np.sum(conv2)) return macro_z
def filtered_cross_corr(signal1,signal2,bins,smoothing=10,timestep=1): """Get the cross-correlation between the signals, first filtering the fourier transforms (smoothed top-hat), chopping the fourier signals into "bins" bins""" signal1 -= signal1.mean() signal2 -= signal2.mean() x1 = rfft(signal1) x2 = rfft(signal2) assert len(x1)==len(x2) startfreq = arange(1,len(x1),len(x1)/(bins+1)) position = arange(len(x1)) freq = fftfreq(len(x1),timestep) freqout = zeros(bins)*1. out = zeros((bins,len(signal1)))*1.0 att = ones(len(x1))*1. for i in range(bins): att[:startfreq[i]] = 0 att[startfreq[i]:startfreq[i+1]] = 1 att[startfreq[i+1]:] = 0 freqout[i] = mean(freq*att[:len(freq)]) att = smooth(att,smoothing) att[0] = 0 x1dash = x1*att sig1dash = irfft(x1dash,len(signal1)) x2dash = x2*att sig2dash = irfft(x2dash,len(signal2)) out[i] = correlate(sig1dash,sig2dash,'same') lag = arange(-len(x2),len(x2),1.)*timestep return lag,freqout,out
def cross_correlate(histogram, template, n_harmonics=None, upsample=16): """Find the shift required to align histogram with template """ n = max(len(histogram),len(template)) h_ft = rfft(histogram) t_ft = rfft(template) if len(h_ft)<len(t_ft): h_ft = concatenate((h_ft,zeros(len(t_ft)-len(h_ft)))) elif len(t_ft)<len(h_ft): t_ft = concatenate((t_ft,zeros(len(h_ft)-len(t_ft)))) if n_harmonics is not None: h_ft[n_harmonics+1:]*=0. t_ft[n_harmonics+1:]*=0. h_ft[0] = 0 t_ft[0] = 0 cross_correlations = irfft(conjugate(h_ft)*t_ft,n*upsample) shift = argmax(cross_correlations)/float(len(cross_correlations)) assert 0<=shift<1 #FIXME: warn if double-peaked return shift
def generalized_cross_correlation(d0, d1): # substract the means # (in order to get a normalized cross-correlation at the end) d0 -= d0.mean() d1 -= d1.mean() # Hann window to mitigate non-periodicity effects window = numpy.hanning(len(d0)) # compute the cross-correlation D0 = rfft(d0 * window) D1 = rfft(d1 * window) D0r = D0.conjugate() G = D0r * D1 # G = (G==0.)*1e-30 + (G<>0.)*G # W = 1. # frequency unweighted # W = 1./numpy.abs(G) # "PHAT" absG = numpy.abs(G) m = max(absG) W = 1. / (1e-10 * m + absG) # D1r = D1.conjugate(); G0 = D0r*D0; G1 = D1r*D1; W = numpy.abs(G)/(G0*G1) # HB weighted Xcorr = irfft(W * G) # Xcorr_unweighted = irfft(G) # numpy.save("d0.npy", d0) # numpy.save("d1.npy", d1) # numpy.save("Xcorr.npy", Xcorr) return Xcorr
def filter_xyz(data): xdata=[a[0] for a in data] ydata=[a[1] for a in data] zdata=[a[2] for a in data] samples=len(ydata) xdata=array(xdata) ydata=array(ydata) zdata=array(zdata) try: #---------------------------------- xfft=(fft.rfft(xdata)) yfft=(fft.rfft(ydata)) zfft=(fft.rfft(zdata)) #-------------filtering part -------------- cutoff=samples/3*2 xfft=ffilter(xfft,cutoff) yfft=ffilter(yfft,cutoff) zfft=ffilter(zfft,cutoff) nxdata=fft.irfft(xfft) nydata=fft.irfft(yfft) nzdata=fft.irfft(zfft) except: raise ValueError('null value') size=len(nxdata) data=[[nxdata[i],nydata[i],nzdata[i]] for i in range(size)] return data
def dispersion(x,t,d,fc1,fp1,fp2,fc2,alpha=1): from matplotlib.pyplot import ginput, plot, close, grid from numpy.fft import rfft,fft from numpy import finfo, zeros, hstack, unwrap, double, pi, angle, arctan2, imag, real close('all') eps=finfo(double).tiny fc1=1e6*fc1 fp1=1e6*fp1 fp2=1e6*fp2 fc2=1e6*fc2 dt=abs(t[-1]-t[-2]) x=x-x[0] plot(t,x) grid(True) tx=ginput(4) close() x1=x[(t>=tx[0][0])&(t<=tx[1][0])] x2=x[(t>=tx[2][0])&(t<=tx[3][0])] toff=t[(t>=tx[1][0])&(t<=tx[2][0])] N1=len(x1) N2=len(x2) N3=len(toff) x1=hstack((x1*tukeywin(N1,alpha),zeros(N2+N3))) x2=hstack((zeros(N1+N3),x2*tukeywin(N2,alpha))) x1=bpfilter(hstack((x1*tukeywin(N1,alpha),zeros(N2+N3))),dt,fc1,fp1,fp2,fc2) x2=bpfilter(hstack((zeros(N1+N3),x2*tukeywin(N2,alpha))),dt,fc1,fp1,fp2,fc2) H=-rfft(x2)/rfft(x1) f=freqs(len(x1),dt) phi=unwrap(angle(H)); phi=phi[(f>=fp1)&(f<=fp2)] f=f[(f>=fp1)&(f<=fp2)] c=-(4.*d*1e-3*pi*f)/phi f=1e-6*f[(f>=fp1)&(f<=fp2)] return f,c,phi
def track_memory(self): ''' Calculates the induced voltage energy kick to particles taking into account multi-turn induced voltage plus inductive impedance contribution. ''' # Contribution from multi-turn induced voltage. self.array_memory *= np.exp(self.omegaj_array_memory * self.rev_time_array[self.counter_turn]) induced_voltage = irfft(self.array_memory + rfft(self.slices.n_macroparticles, self.n_points_fft) * self.sum_impedances_memory, self.n_points_fft) self.induced_voltage = self.coefficient * induced_voltage[:self.slices.n_slices] induced_voltage[self.len_array_memory:]=0 self.array_memory = rfft(induced_voltage, self.n_points_fft) # Contribution from inductive impedance if self.inductive_impedance_on: self.induced_voltage_list[self.index_inductive_impedance].induced_voltage_generation(self.beam, 'slice_frame') self.induced_voltage += self.induced_voltage_list[self.index_inductive_impedance].induced_voltage # Induced voltage energy kick to particles through linear interpolation libfib.linear_interp_kick(self.beam.dt.ctypes.data_as(ctypes.c_void_p), self.beam.dE.ctypes.data_as(ctypes.c_void_p), self.induced_voltage.ctypes.data_as(ctypes.c_void_p), self.slices.bin_centers.ctypes.data_as(ctypes.c_void_p), ctypes.c_uint(self.slices.n_slices), ctypes.c_uint(self.beam.n_macroparticles), ctypes.c_double(0.)) # Counter update self.counter_turn += 1
def gaussian_convolution(data, ijk_sdev, cyclic = False, cutoff = 5, task = None): from numpy import array, single as floatc, multiply, swapaxes c = array(data, floatc) from numpy.fft import rfft, irfft for axis in range(3): # Transform one axis at a time. size = c.shape[axis] sdev = ijk_sdev[2-axis] # Axes i,j,k are 2,1,0. hw = min(size/2, int(cutoff*sdev+1)) if cutoff else size/2 nzeros = 0 if cyclic else hw # Zero-fill for non-cyclic convolution. if nzeros > 0: # FFT performance is much better (up to 10x faster in numpy 1.2.1) # than other sizes. nzeros = efficient_fft_size(size + nzeros) - size g = gaussian(sdev, size + nzeros) g[hw:-hw] = 0 fg = rfft(g) # Fourier transform of 1-d gaussian. cs = swapaxes(c, axis, 2) # Make axis 2 the FT axis. s0 = cs.shape[0] for p in range(s0): # Transform one plane at a time. cp = cs[p,...] try: ft = rfft(cp, n=len(g)) # Complex128 result, size n/2+1 except ValueError, e: raise MemoryError, e # Array dimensions too large. multiply(ft, fg, ft) cp[:,:] = irfft(ft)[:,:size] # Float64 result if task: pct = 100.0 * (axis + float(p)/s0) / 3.0 task.updateStatus('%.0f%%' % pct)
def periodic_interp(self,data, zoomfact, window='hanning', alpha=6.0): """ Return a periodic, windowed, sinc-interpolation of the data which is oversampled by a factor of 'zoomfact'. """ zoomfact = int(zoomfact) if (zoomfact < 1): #print "zoomfact must be >= 1." return 0.0 elif zoomfact==1: return data newN = len(data)*zoomfact # Space out the data comb = Num.zeros((zoomfact, len(data)), dtype='d') comb[0] += data comb = Num.reshape(Num.transpose(comb), (newN,)) # Compute the offsets xs = Num.zeros(newN, dtype='d') xs[:newN/2+1] = Num.arange(newN/2+1, dtype='d')/zoomfact xs[-newN/2:] = xs[::-1][newN/2-1:-1] # Calculate the sinc times window for the kernel if window.lower()=="kaiser": win = _window_function[window](xs, len(data)/2, alpha) else: win = _window_function[window](xs, len(data)/2) kernel = win * self.sinc(xs) if (0): print "would have plotted." return FFT.irfft(FFT.rfft(kernel) * FFT.rfft(comb))
def fftfit_err(data_prof, tmp_prof, tau=0., scale=1.): """ Estimates the uncertainties in the fftfit parameters. """ tmp_fft = nf.rfft(tmp_prof) amp_tmp = np.absolute(tmp_fft) phs_tmp = np.angle(tmp_fft) dat_fft = nf.rfft(data_prof) amp_dat = np.absolute(dat_fft) phs_dat = np.angle(dat_fft) k = np.linspace(0, len(amp_dat)-1, num=len(amp_dat)) diff = diffprof(data_prof, tmp_prof) diff_rms_sq = np.sum(diff**2) / len(diff) # compute Equations A10 and A11 from Taylor (1992). var_b = diff_rms_sq / (2 * scale * np.sum(k[1:]**2 * amp_tmp[1:] * amp_dat[1:] * \ np.cos(phs_tmp[1:] - phs_dat[1:] + k[1:] * (tau * 2 * np.pi)))) sig_scale = np.sqrt(var_b) var_t = diff_rms_sq / (2 * np.sum(amp_dat[1:]**2)) sig_shift = np.sqrt(var_t) # propagate uncertainty in scale to get uncertainty in baseline. sig_offset = sig_scale * amp_tmp[0] / len(data_prof) return sig_offset, sig_shift, sig_scale
def processScenario(m_surface, scenario, servo_id, beat, mode=0): tmp = [] m_data = [] #l = len(m_surface) s = len(scenario) ms_index = -1 for i in range(s): # << the augmented gestures can only be as long as the scenario length p_data = [] #m_data = [] p_data_tmp = [] mapped = False r = 0 #l = len(scenario[i]) l = len(scenario[i]) print "length:", l """if m_surface[i]['phrase'] and m_surface[i]['notes'] != []: # If it's a (melodic) phrase... # just a reminder of the contents of phrase info: # 'phrase_start', 'phrase_end', 'phrase_length', 'contour_amplitudes', 'contour_energy', # 'contour_time', 'notes' p_data = numpy.multiply(m_surface[i]['contour_amplitudes'], m_surface[i]['contour_energy']) p_data_ = [converts(int(p)) for p in p_data] print "scenario[%d] = " % (i), scenario[i] print "m_surface[%d] = " % (i), m_surface[i] p_data_tmp = fft.ifft(numpy.add(fft.rfft(scenario[i], n=l), fft.rfft(p_data_, n=l))) print p_data_tmp if m_surface[i]['contour_energy'] != []: tmp.extend([accel(m_surface[i]['contour_energy'][0], servo_id)]) tmp.extend(p_data_tmp) elif not m_surface[i]['phrase']: # Otherwise it's a rest (phrase). tmp.extend([m_surface[i]['rest_duration']*beat]) else: continue """ while not mapped: ms_index += 1 if m_surface[ms_index]['phrase'] and m_surface[ms_index]['phrase_length'] != 0 and len(m_surface[ms_index]['contour_amplitudes']) > (l/2): print m_surface[ms_index] #p_data = fft.irfft(fft.rfft(scenario[i], l)*fft.rfft(m_surface[ms_index]['contour_amplitudes'], l)) p_data = fft.irfft(numpy.multiply(fft.rfft(scenario[i], l), .02*(fft.rfft(m_surface[ms_index]['contour_amplitudes'], l)[2])))#(numpy.multiply(m_surface[ms_index]['contour_amplitudes'], m_surface[ms_index]['contour_energy']), l))) mapped = True print "pdata:",p_data m_data.append(m_surface[ms_index]['contour_amplitudes']) break elif m_surface[ms_index]['phrase'] == False: #print m_surface #p_data = 'rest' #tmp.append(m_surface[ms_index]['rest_duration']) r += m_surface[ms_index]['rest_duration'] print "rest" tmp.append(r) tmp.append(p_data) #print tmp return tmp, m_data
def _bode(idat, odat, fs): ift = rfft(idat, fs) oft = rfft(odat, fs) trans = (ift * conjugate(oft)) / (ift * conjugate(ift) ) freq = arange(trans.shape[0]).astype(Float64)*fs/(2*trans.shape[0]) amp=abs(trans) phase=arctan2(trans.imag, trans.real) return (freq, amp, phase)
def my_multi_convolve(signals): shape = sum(len(x)-1 for x in signals) + 1 fshape = _next_regular(shape) res = rfft(signals[0], fshape) for signal in signals[1:]: res *= rfft(signal, fshape) ret = irfft(res, fshape) return ret[:shape]
def fourier_analysis(data, ptree=None): time = data['time'] current = data['current'] voltage = data['voltage'] # inspect data n = len(time) # normalization factor for fft assert len(current) == n assert len(voltage) == n d = time[1] - time[0] # inverse of the sampling rate # check sampling spacing is the same everywhere for i in range(n - 1): assert isclose(time[i + 1] - time[i], d, atol=1e-10, rtol=1e-10) # truncate signals if ptree: steps_per_cycle = ptree.get_int('steps_per_cycle') cycles = ptree.get_int('cycles') ignore_cycles = ptree.get_int('ignore_cycles') assert cycles > ignore_cycles assert n == cycles * steps_per_cycle time = time[ignore_cycles * steps_per_cycle:] current = current[ignore_cycles * steps_per_cycle:] voltage = voltage[ignore_cycles * steps_per_cycle:] else: time = time[int(n / 2):] current = current[int(n / 2):] voltage = voltage[int(n / 2):] n = len(time) assert len(current) == n assert len(voltage) == n if not _is_power_of_two(n): warn( "(cycles-ignore_cycles)*steps_per_cycles is not a " "power of 2 (most efficient for the fourier analysis)", RuntimeWarning) # perform the actual fourrier analaysis fft_current = fft.rfft(current) / n fft_voltage = fft.rfft(voltage) / n fft_frequency = fft.rfftfreq(n, d) # find the excited harmonics if ptree: harmonics = array(ptree.get_array_int('harmonics')) peak_indices = harmonics * (cycles - ignore_cycles) else: mx, mn = peakdet(absolute(fft_voltage), mean(absolute(fft_current))) peak_indices = int(mx[:, 0]) mx, mn = peakdet(absolute(fft_voltage), mean(absolute(fft_current))) assert peak_indices == mx[:, 0] frequency = fft_frequency[peak_indices] impedance = fft_voltage[peak_indices] / fft_current[peak_indices] return [frequency, impedance]
def rms_rfftdf(df): X=rfft(df['accel-x']) Y=rfft(df['accel-y']) Z=rfft(df['accel-z']) x_rfft= rms_rfft(X, len(df)) y_rfft= rms_rfft(Y, len(df)) z_rfft= rms_rfft(Z, len(df)) return pd.DataFrame({'x': [x_rfft],'y': [y_rfft], 'z':[z_rfft]})
def get_chan_freqs(frames): """ Splits the given frames into left and right channel frequencies """ structsize = len(frames) / 2 chunk = unpack('<' + 'h' * structsize, frames) left = [chunk[j] for j in range(0, len(chunk), 2)] right = [chunk[j] for j in range(1, len(chunk), 2)] l_freqs = rfft(left) r_freqs = rfft(right) return rfft(left), rfft(right), structsize
def H1(x,y,dt,frng,uwphase=True,nfreqs='All'): """ Returns a frequency vector, transfer function between x and y, and the wrapped phase of the transfer function """ from numpy import zeros,conjugate,linspace, angle, unwrap, array, arange from numpy.fft import rfft # X = rfft(x,int(1/(df*dt))) # Y = rfft(y,int(1/(df*dt))) X=rfft(x) Xc=conjugate(X) H=[] phi=[] f=linspace(0.,1/(2*dt),len(X)) df=f[1]-f[0] if1=NearestValue(f,frng[0]) if2=NearestValue(f,frng[1]) if nfreqs is 'All': find=arange(if1,if2,1) else: find=linspace(if1,if2,nfreqs).astype(int) for i in range(len(y)): Y=rfft(y[i]) HH=(Y*Xc)/(X*Xc) pphi=angle(HH) if uwphase: pphi=unwrap(angle(HH)) H.append(HH[find]) phi.append(pphi[find]) f=f[find] f=array(f) H=array(H) phi=array(phi) return f,H,phi
def to_freq(self, data_type): #self._data1 and self._data2 need to be defined by the object delta_f = 1 / (self.delta_t * self.pulses_length) data1_freq = rfft(self._data[self._data1]) data2_freq = rfft(self._data[self._data2]) freq_pulses_length = data1_freq.shape[1] pulses_freq = data_type(freq_pulses_length, self.pulses_nb, delta_f) pulses_freq._data['Valim'] = self.valim pulses_freq._data[self._data1] = data1_freq pulses_freq._data[self._data2] = data2_freq return pulses_freq
def cross_spectrum(signal1,signal2,timestep=1): """The fourier cross-spectrum, returns the amplitude and phase (lag) at each fourier frequency. If you have time-series that are not exactly co-sampled, you need to rebin to a common grid first (see general_rebin). """ x1 = rfft(signal1) x2 = rfft(signal2) c = conj(x1)*x2 freq = abs(fftfreq(len(signal1),timestep)[:len(x1)]) c /= signal1.mean()*signal2.mean() return freq,abs(c),angle(c)
def fftfit(data_prof, tmp_prof, tolerance=1e-12): """ A function that uses the Taylor (1992) method to determine the shift between a template profile with a noisey profile. """ tmp_fft = nf.rfft(tmp_prof) amp_tmp = np.absolute(tmp_fft) phs_tmp = np.angle(tmp_fft) k_tmp = np.linspace(0, len(amp_tmp)-1, num=len(amp_tmp)) dat_fft = nf.rfft(data_prof) amp_dat = np.absolute(dat_fft) phs_dat = np.angle(dat_fft) k_dat = np.linspace(0, len(amp_dat)-1, num=len(amp_dat)) # compute Equation A7 in Taylor (1992) for different values of index k. phs_array = np.linspace(0, 1, num=len(k_dat)) A7vals = np.zeros(len(phs_array)) count = 0 for phs in phs_array: A7vals[count] = tay92_equation_A7(phs, amp_tmp[1:], amp_dat[1:], phs_tmp[1:], phs_dat[1:], k_tmp[1:]) count += 1 c, f_best = 0, 0 a = (phs_array[np.where(A7vals == max(A7vals))])[0] b = (phs_array[np.where(A7vals == min(A7vals))])[0] # if shift is ~0, then the quantity (a - b) is negative. # this messes up the following method, so rotate b backwards by one cycle. if ((a - b) < 0.): b -= 1. # implement Brent's method for finding the zero to Equation A7. while (np.fabs(b - a) > tolerance): c = (a + b) / 2 f_best = tay92_equation_A7(c, amp_tmp[1:], amp_dat[1:], phs_tmp[1:], phs_dat[1:], k_tmp[1:]) if (f_best < 0.): b = c else: a = c best_shift = c best_scale = tay92_equation_A9(best_shift, amp_tmp[1:], amp_dat[1:], phs_tmp[1:], phs_dat[1:], k_tmp[1:]) # compute relative offset of profile baselines. best_offset = (amp_dat[0] - best_scale * amp_tmp[0]) / len(data_prof) return best_offset, best_shift, best_scale
def single_step_propagation(self): """ Perform single step propagation. The final Wigner functions are not normalized. """ ################ p x -> theta x ################ self.wigner_ge = fftpack.fft(self.wigner_ge, axis=0, overwrite_x=True) self.wigner_g = fftpack.fft(self.wigner_g, axis=0, overwrite_x=True) self.wigner_e = fftpack.fft(self.wigner_e, axis=0, overwrite_x=True) # Construct T matricies TgL, TgeL, TeL = self.get_T_left(self.t) TgR, TgeR, TeR = self.get_T_right(self.t) # Save previous version of the Wigner function Wg, Wge, We = self.wigner_g, self.wigner_ge, self.wigner_e # First update the complex valued off diagonal wigner function self.wigner_ge = (TgL*Wg + TgeL*Wge.conj())*TgeR + (TgL*Wge + TgeL*We)*TeR # Slice arrays to employ the symmetry (savings in speed) TgL, TgeL, TeL = self.theta_slice(TgL, TgeL, TeL) TgR, TgeR, TeR = self.theta_slice(TgR, TgeR, TeR) Wg, Wge, We = self.theta_slice(Wg, Wge, We) # Calculate the remaning real valued Wigner functions self.wigner_g = (TgL*Wg + TgeL*Wge.conj())*TgR + (TgL*Wge + TgeL*We)*TgeR self.wigner_e = (TgeL*Wg + TeL*Wge.conj())*TgeR + (TgeL*Wge + TeL*We)*TeR ################ Apply the phase factor ################ self.wigner_ge *= self.expV self.wigner_g *= self.expV[:(1 + self.P_gridDIM//2), :] self.wigner_e *= self.expV[:(1 + self.P_gridDIM//2), :] ################ theta x -> p x ################ self.wigner_ge = fftpack.ifft(self.wigner_ge, axis=0, overwrite_x=True) self.wigner_g = fft.irfft(self.wigner_g, axis=0) self.wigner_e = fft.irfft(self.wigner_e, axis=0) ################ p x -> p lambda ################ self.wigner_ge = fftpack.fft(self.wigner_ge, axis=1, overwrite_x=True) self.wigner_g = fft.rfft(self.wigner_g, axis=1) self.wigner_e = fft.rfft(self.wigner_e, axis=1) ################ Apply the phase factor ################ self.wigner_ge *= self.expK self.wigner_g *= self.expK[:, :(1 + self.X_gridDIM//2)] self.wigner_e *= self.expK[:, :(1 + self.X_gridDIM//2)] ################ p lambda -> p x ################ self.wigner_ge = fftpack.ifft(self.wigner_ge, axis=1, overwrite_x=True) self.wigner_g = fft.irfft(self.wigner_g, axis=1) self.wigner_e = fft.irfft(self.wigner_e, axis=1)
def corr_FXt(x0,x1,fft_window_size=32768,search_range=None,search_avg=1): """ Do FX cross-correlation by subdividing time-series into FFT windows. Optionally perform a search over multiple FFT window offsets. Arguments: ---------- x0,x1 -- Time-domain signals. fft_window_size -- The number of samples to take in an FFT window. search_range -- Range of FFT window offsets to search for cross- correlation, or None to not do search (default is None). search_avg -- Number of windows over which to average when doing a search. If search is not performed this parameter has no impact (default is 1). Returns: -------- s_0x1 -- Time-domain cross-correlation of two signals. If search is done this is two-dimensional, with relative window offset along the zeroth dimension. S_0x1 -- Cross-power spectrum of two signals. If search is done this is two-dimensional, with relative window offset along the zeroth dimension. s_peaks -- If search is done, this returns the peak in the cross- correlation as a function of relative window offset. Notes: ------ The search is done from the center window in x0, and over the search range, relative to that window, in x1. Code written by cross_corr.py by Andre Young. Changes: ------ Uses rfft and assumes real Time-domain signal Removes phase """ N_samples = min((2**int(floor(log2(x0.size))),2**int(floor(log2(x1.size))))) X0 = rfft(x0[:N_samples].reshape((N_samples/fft_window_size,fft_window_size)),axis=1) X1 = rfft(x1[:N_samples].reshape((N_samples/fft_window_size,fft_window_size)),axis=1) if (search_range == None): s_peaks = None s_0x1,S_0x1 = corr_Xt(X0,X1,fft_window_size=fft_window_size) else: # do search s_0x1,S_0x1,s_peaks = corr_Xt_search(X0,X1,fft_window_size=fft_window_size,search_range=search_range,search_avg=search_avg) return s_0x1,S_0x1,s_peaks
def output(self): """ """ # One dimension if len(self._source.shape) == 1: source = self._actual_source # Use FFT convolution if self._fft: if not self._toric: P = rfft(source,self._fft_shape[0])*self._fft_weights R = irfft(P, self._fft_shape[0]).real R = R[self._fft_indices] else: P = rfft(source)*self._fft_weights R = irfft(P,source.shape[0]).real # if self._toric: # R = ifft(fft(source)*self._fft_weights).real # else: # n = source.shape[0] # self._src_holder[n//2:n//2+n] = source # R = ifft(fft(self._src_holder)*self._fft_weights) # R = R.real[n//2:n//2+n] # Use regular convolution else: R = convolve1d(source, self._weights[::-1], self._toric) if self._src_rows is not None: R = R[self._src_rows] return R.reshape(self._target.shape) # Two dimensions else: source = self._actual_source # Use FFT convolution if self._fft: if not self._toric: P = rfft2(source,self._fft_shape)*self._fft_weights R = irfft2(P, self._fft_shape).real R = R[self._fft_indices] else: P = rfft2(source)*self._fft_weights R = irfft2(P,source.shape).real # Use SVD convolution else: R = convolve2d(source, self._weights, self._USV, self._toric) if self._src_rows is not None and self._src_cols is not None: R = R[self._src_rows, self._src_cols] return R.reshape(self._target.shape)
def dft_pitch(sig, size=2048, hop=None): for blk in Stream(sig).blocks(size=size, hop=hop): dft_data = rfft(blk) idx, vmax = max(enumerate(dft_data), key=lambda el: abs(el[1]) / (2 * el[0] / size + 1) ) yield 2 * pi * idx / size
def autocorr(self, x): """ multi-dimensional autocorrelation with FFT """ X = rfft(x, n=(x.shape[1]*2-1), axis=1) xr = irfft(X * X.conjugate(), axis=1).real xr = fftshift(xr, axes=1) xr = xr.sum(axis=1) return xr
def getPSD(self): if (self.offset + self.chunk > self.data.size): print('end of file') return False spectrum = rfft(self.data[self.offset : self.offset + self.chunk], n = 512) self.offset += self.sliding return (np.abs(spectrum[:256])**2)/256
def _fft(self, audio_data): from numpy import fft amp = fft.rfft(audio_data) freq = fft.fftfreq(audio_data.shape[-1])[:len(amp)] return freq, amp.real
plt.subplot(2, 2, 2) plt.plot(peaks2, x[peaks2], "ob") plt.plot(x) plt.legend(['prominence']) plt.subplot(2, 2, 3) plt.plot(peaks3, x[peaks3], "vg") plt.plot(x) plt.legend(['width']) plt.subplot(2, 2, 4) plt.plot(peaks4, x[peaks4], "xk") plt.plot(x) plt.legend(['threshold']) plt.show() #%% Draw spectrum spectrum = rfft( measurements) # прямое одномерное ДПФ (для действительных сигналов) plt.plot(60 * rfftfreq(len(measurements), 1. / (len(measurements) / fd)), np_abs(spectrum) / len(measurements)) plt.title('Pulse wave spectrum') plt.ylim([0, 0.005]) # plt.xlim([0, 200]) plt.grid() plt.show() #%% Draw spectrum with peaks spectrum = rfft( measurements) # прямое одномерное ДПФ (для действительных сигналов) freq = rfftfreq(len(measurements), 1. / (len(measurements) / fd)) ampl = np_abs(spectrum) / len(measurements)
def transform(self, X): """Transform data""" return np.abs(rfft(X)[:, self.idx])
import matplotlib.pyplot as plt from math import sin, pi, ceil, fabs from numpy import array, arange, abs as np_abs from numpy.fft import rfft, rfftfreq import numpy as np f = 1000 fd = 44100 fnotu = 147 N = 100000 v = 10**2 garmoniks = [v*sin(2*pi*f*t/fd) for t in range(N)] spectrum = rfft(garmoniks) x = [i/fd for i in range(N)] Am = max(spectrum) df = fd/N for i in range(len(rfftfreq(N, 1/fd))): if spectrum[i] == Am: Fm = rfftfreq(N, 1/fd)[i] fdn = fabs(Fm - fnotu) f = open('results.txt','w') f.write("\nОкiл реальноi максимальноi частоти fm = "+str(ceil((Fm)*100)/100)+" = ("+str(ceil((Fm - df)*100)/100)+";"+str(ceil((Fm + df)*100)/100)+").") f.write("\nЧастота максимальна тону гiтарноi ноти становить = "+str(fnotu)+".") f.write("\nРеальна рiзниця становить = +/-"+str(ceil((fdn)*100)/100)+", теоретична = +/-"+str(df)+".\n") f.close()
plt.title("Noise") plt.subplot(3,1,2) plt.plot(Ts*arange(0,len(T_column)), T_column) plt.xlim(0, Ts*len(T_column)) if rec_type == 'acceleration': if USETEX: plt.ylabel('[$\\textrm{mm}^2/\\textrm{s}^4$]') else: plt.ylabel('[mm/s4]') elif rec_type == 'velocity': if USETEX: plt.ylabel('[$\\textrm{mm}^2/\\textrm{s}^2$]') else: plt.ylabel('[mm/s2]') else: plt.ylabel('[?]') plt.xlabel('Lag [s]') plt.title("Autocovariance") plt.subplot(3,1,3) plt.plot(rfftfreq(K, Ts), Ts*np.abs(rfft(T_column))) plt.xlim(0,rfftfreq(K, Ts)[-1]) plt.yscale('log') if rec_type == 'acceleration': if USETEX: plt.ylabel('[$\\textrm{mm}/\\textrm{s}$]') else: plt.ylabel('[mm/s]') elif rec_type == 'velocity': if USETEX: plt.ylabel('[$\\textrm{mm}$]') else: plt.ylabel('[mm]') else: plt.ylabel('[?]') plt.xlabel('Frequency [Hz]') plt.title("Power spectral density") plt.tight_layout() plt.show()
def getSprectumDOS(self): return rfft(self.VACF, 2 * len(self.VACF) - 1, 0).real * 2 * self.dt * self.fact
import easyplot as ep from numpy import pi, linspace, cos, sin, fft freq1, freq2 = 5, 25 ampl = 2.0 bins = 200 dt = 0.01 duration = bins*dt timeseries = linspace(0, duration, bins) frequency = linspace(0, 1./dt/2, bins/2) signal = ampl*sin(2*pi*freq1*timeseries) + 5*sin(2*pi*freq2*timeseries) amplitudes = fft.rfft(signal, bins-1) ep.easyplot(frequency, 2*abs(amplitudes)/bins, 'Hz', 'Amplitudes', save_as='recordset/sinewave', xlim=[0, 30])
n = int(input()) A = list(map(int, input().split())) a = [x for x in A if x != 0] mod = 200003 pr = 2 g = [0] * mod log = [0] * mod exp_pr = 1 for i in range(mod - 1): g[i] = exp_pr log[exp_pr] = i exp_pr = exp_pr * pr % mod D = [0] * mod ans = 0 for x in a: D[log[x]] += 1 ans -= x * x % mod l = 1 << ((mod - 1).bit_length() + 1) fa = fft.rfft(D, l) E = (fft.irfft(fa * fa)[:2 * mod - 2] + 0.1).astype(int) for k in range(mod - 1): ans += g[k] * (E[k] + E[mod - 1 + k]) print(ans // 2)
def J_k_tensor(self, P, X, P_window=None, C_window=None): pf, p, nu1, nu2, g_m, g_n, h_l = X if (self.low_extrap is not None): P = self.EK.extrap_P_low(P) if (self.high_extrap is not None): P = self.EK.extrap_P_high(P) A_out = np.zeros((pf.size, self.k_size)) P_fin = np.zeros(self.k_size) for i in range(pf.size): P_b1 = P * self.k_old**(-nu1[i]) P_b2 = P * self.k_old**(-nu2[i]) if (P_window != None): # window the input power spectrum, so that at high and low k # the signal smoothly tapers to zero. This makes the input # more like a periodic signal if (self.verbose): print('windowing biased power spectrum') W = p_window(self.k_old, P_window[0], P_window[1]) P_b1 = P_b1 * W P_b2 = P_b2 * W if (self.n_pad != 0): P_b1 = np.pad(P_b1, pad_width=(self.n_pad, self.n_pad), mode='constant', constant_values=0) P_b2 = np.pad(P_b2, pad_width=(self.n_pad, self.n_pad), mode='constant', constant_values=0) c_m_positive = rfft(P_b1) c_n_positive = rfft(P_b2) c_m_negative = np.conjugate(c_m_positive[1:]) c_n_negative = np.conjugate(c_n_positive[1:]) c_m = np.hstack((c_m_negative[::-1], c_m_positive)) / float(self.N) c_n = np.hstack((c_n_negative[::-1], c_n_positive)) / float(self.N) if (C_window != None): # window the Fourier coefficients. # This will damping the highest frequencies if (self.verbose): print('windowing the Fourier coefficients') c_m = c_m * c_window(self.m, int(C_window * self.N / 2.)) c_n = c_n * c_window(self.m, int(C_window * self.N / 2.)) # convolve f_c and g_c C_l = fftconvolve(c_m * g_m[i, :], c_n * g_n[i, :]) #C_l=convolve(c_m*self.g_m[i,:],c_m*self.g_n[i,:]) # multiply all l terms together #C_l=C_l*self.h_l[i,:]*self.two_part_l[i] C_l = C_l * h_l[i, :] # set up to feed ifft an array ordered with l=0,1,...,-1,...,N/2-1 c_plus = C_l[self.l >= 0] c_minus = C_l[self.l < 0] C_l = np.hstack((c_plus[:-1], c_minus)) A_k = ifft( C_l ) * C_l.size # multiply by size to get rid of the normalization in ifft A_out[i, :] = np.real(A_k[::2]) * pf[i] * self.k**(p[i]) # note that you have to take every other element # in A_k, due to the extended array created from the # discrete convolution P_fin += A_out[i, :] # P_out=irfft(c_m[self.m>=0])*self.k**self.nu*float(self.N) if (self.n_pad != 0): # get rid of the elements created from padding # P_out=P_out[self.id_pad] A_out = A_out[:, self.id_pad] P_fin = P_fin[self.id_pad] return P_fin, A_out
def J_k_scalar(self, P_in, X, nu, P_window=None, C_window=None): pf, p, g_m, g_n, two_part_l, h_l = X if (self.low_extrap is not None): P_in = self.EK.extrap_P_low(P_in) if (self.high_extrap is not None): P_in = self.EK.extrap_P_high(P_in) P_b = P_in * self.k_old**(-nu) if (self.n_pad is not None): P_b = np.pad(P_b, pad_width=(self.n_pad, self.n_pad), mode='constant', constant_values=0) c_m_positive = rfft(P_b) # We always filter the Fourier coefficients, so the last element is zero. # But in case someone does not filter, divide the end point by two c_m_positive[-1] = c_m_positive[-1] / 2. c_m_negative = np.conjugate(c_m_positive[1:]) c_m = np.hstack((c_m_negative[::-1], c_m_positive)) / float(self.N) if (C_window != None): # Window the Fourier coefficients. # This will damp the highest frequencies if (self.verbose): print('windowing the Fourier coefficients') c_m = c_m * c_window(self.m, int(C_window * self.N // 2.)) A_out = np.zeros((pf.shape[0], self.k_size)) for i in range(pf.shape[0]): # convolve f_c and g_c #C_l=np.convolve(c_m*self.g_m[i,:],c_m*self.g_n[i,:]) C_l = fftconvolve(c_m * g_m[i, :], c_m * g_n[i, :]) # multiply all l terms together C_l = C_l * h_l[i, :] * two_part_l[i] # set up to feed ifft an array ordered with l=0,1,...,-1,...,N/2-1 c_plus = C_l[self.l >= 0] c_minus = C_l[self.l < 0] C_l = np.hstack((c_plus[:-1], c_minus)) A_k = ifft( C_l ) * C_l.size # multiply by size to get rid of the normalization in ifft A_out[i, :] = np.real(A_k[::2]) * pf[i] * self.k**(-p[i] - 2) # note that you have to take every other element # in A_k, due to the extended array created from the # discrete convolution P_out = irfft(c_m[self.m >= 0]) * self.k**nu * float(self.N) if (self.n_pad is not None): # get rid of the elements created from padding P_out = P_out[self.id_pad] A_out = A_out[:, self.id_pad] return P_out, A_out
def update_nmda_sum(): fft_s_NMDA = rfft(excit_pop.s_NMDA) fft_s_NMDA_total = numpy.multiply(fft_presyn_weight_kernel, fft_s_NMDA) s_NMDA_tot = irfft(fft_s_NMDA_total) excit_pop.s_NMDA_total_ = s_NMDA_tot
t_source) == par_file.nt # Check that len(t_source) ~ par_file.nt ############### Down sample the signal ############### t_source = t_source[::NdownSampled] aS = aS[::NdownSampled] if DEBUG_PLOT: plt.plot(t_source, aS, '+-g', label='Source filtree downsamplee') ########### Zero pad up to next power of 2 ########### t_source, aS = zeroPad(t_source, aS, N2 - Ndown) # Zero-pad the source if DEBUG_PLOT: plt.plot(t_source, aS, '+-k', label='Source filtree downsamplee zeropadee') plt.legend() ############# Compute amplitude spectrum ############# Sf = abs(rfft(aS, N2) / Ndown) #**2 freq_source = rfftfreq(N2, d=par_file.dt * NdownSampled) if DEBUG_PLOT: plt.figure() plt.plot(freq_source, Sf, 'o-') plt.title('Source spectrum') ##### Interpolate to calculate amplitude at freq ##### idxInf = int(np.floor(freq / (freq_source[1] - freq_source[0]))) idxSup = int(idxInf + 1) ref_amplitude=((Sf[idxSup]-Sf[idxInf])/(freq_source[idxSup]-freq_source[idxInf])) \ * freq + (Sf[idxSup]*freq_source[idxInf]-Sf[idxInf]*freq_source[idxSup])/(freq_source[idxInf]-freq_source[idxSup]) if args.verbose: print("Calculated reference amplitude at", freq, 'Hz:', ref_amplitude) if DEBUG_PLOT: plt.show()
aux.append(kurtosis(X[i])) # Autocorrelazione acf = correlate(X[i], X[i], 'full')[-len(X[0]):] aut = [] for j in range(acf.size): if j % 8 == 0: aut.append(acf[j]) while len(aut) < 10: tmp = acf[X[i].size - (10 - len(aut))] aut.append(tmp) aux.extend(aut) # Trasformata discreta di Fourier fourier = fft.rfft(X[i] - X[i].mean()) freq = fft.rfftfreq(X[i].size, d=1. / 15) inflection = diff(sign(diff(fourier))) peaks = (inflection < 0).nonzero()[0] + 1 # primi 5 picchi trasformata di fourier peak = fourier[peaks] # Frequenza dei picchi della trasformata discreta di fourier signal_freq = freq[peaks] aux.extend(peak[:5]) aux.extend(signal_freq[:5]) row.extend(aux) if i == X.shape[1] - 4:
def fourier(window): f = np.abs(rfft(window))**2 n = len(window) return (2.0 / n) * f[:n // 2]
FILENAME_SIGNAL_NOISE = "5_signal_noise.csv" #Массивы ar_signal_noise, ar_signal, ar_noise = readCSVFile(FILENAME_SIGNAL_NOISE) dli = math.log2(len(ar_signal)) N = 2**math.floor(dli) - 1 print("Длина БПФ {}".format(N)) ar_signal = ar_signal[:N] ar_noise = ar_noise[:N] ar_signal_noise = ar_signal_noise[:N] FD = 100000 spectrum_signal = rfft(ar_signal) spectrum_noise = rfft(ar_noise) spectrum_signal_noise = rfft(ar_signal_noise) formatterAmplitude = ticker.StrMethodFormatter("{x:,g} В") formatterTime = ticker.StrMethodFormatter("{x:,g} Гц") freq = rfftfreq(N, 1. / FD) ffreq = fftfreq(N, 1. / FD) abs_spectrum_signal = abs(spectrum_signal) / N #abs_spectrum_signal[0] = 0 abs_spectrum_noise = abs(spectrum_noise) / N #abs_spectrum_noise[0] = 0 abs_spectrum_signal_noise = abs(spectrum_signal_noise) / N
def stft(x, n_fft=2048, hop_length=None, win_length=None, window="hann", center=True, pad_mode="reflect"): y = x input_rank = len(y.shape) if input_rank == 2: assert y.shape[0] == 1 # Only 1d input supported in librosa y = y.squeeze(0) dtype = None # By default, use the entire frame if win_length is None: win_length = n_fft # Set the default hop, if it's not already specified if hop_length is None: hop_length = int(win_length // 4) fft_window = get_window(window, win_length, fftbins=True) # Pad the window out to n_fft size fft_window = pad_center(fft_window, n_fft) # Reshape so that the window can be broadcast fft_window = fft_window.reshape((-1, 1)) # Pad the time series so that frames are centered if center: if n_fft > y.shape[-1]: print("n_fft={} is too small for input signal of length={}".format( n_fft, y.shape[-1])) y = np.pad(y, int(n_fft // 2), mode=pad_mode) elif n_fft > y.shape[-1]: raise Exception( "n_fft={} is too large for input signal of length={}".format( n_fft, y.shape[-1])) # Window the time series. y_frames = frame(y, frame_length=n_fft, hop_length=hop_length) if dtype is None: dtype = dtype_r2c(y.dtype) # Pre-allocate the STFT matrix stft_matrix = np.empty((int(1 + n_fft // 2), y_frames.shape[1]), dtype=dtype, order="F") # how many columns can we fit within MAX_MEM_BLOCK? n_columns = MAX_MEM_BLOCK // (stft_matrix.shape[0] * stft_matrix.itemsize) n_columns = max(n_columns, 1) for bl_s in range(0, stft_matrix.shape[1], n_columns): bl_t = min(bl_s + n_columns, stft_matrix.shape[1]) stft_matrix[:, bl_s:bl_t] = fft.rfft(fft_window * y_frames[:, bl_s:bl_t], axis=0) if input_rank == 2: stft_matrix = np.expand_dims(stft_matrix, 0) return stft_matrix
def beam_spectrum_generation(self, n_sampling_fft = None): """ Beam spectrum calculation """ self._beam_spectrum = fft.rfft(self.profile_array, n_sampling_fft)
def _fft(x, s_freq, detrend='linear', taper=None, output='spectraldensity', sides='one', scaling='power', halfbandwidth=4, NW=None, n_fft=None): """ Core function taking care of computing the power spectrum / power spectral density or the complex representation. Parameters ---------- x : 1d or 2d numpy array input data (fft will be computed on the last dimension) s_freq : int sampling frequency detrend : str None (no detrending), 'constant' (remove mean), 'linear' (remove linear trend) output : str 'spectraldensity' (= 'psd' in scipy) or 'complex' (for complex output) sides : str 'one' or 'two', where 'two' implies negative frequencies scaling : str 'power' (= 'density' in scipy, units: uV ** 2 / Hz), 'energy' (= 'spectrum' in scipy, units: uV ** 2), 'fieldtrip', 'chronux' taper : str Taper to use, commonly used tapers are 'boxcar', 'hann', 'dpss' (see below) halfbandwidth : int (only if taper='dpss') Half bandwidth (in Hz), frequency smoothing will be from +halfbandwidth to -halfbandwidth NW : int (only if taper='dpss') Normalized half bandwidth (NW = halfbandwidth * dur). Number of DPSS tapers is 2 * NW - 1. If specified, NW takes precedence over halfbandwidth n_fft: int Length of FFT, in samples. If less than input axis, input is cropped. If longer than input axis, input is padded with zeros. If None, FFT length set to axis length. Returns ------- freqs : 1d ndarray vector with frequencies at which the PSD / ESD / complex fourier was computed result: ndarray PSD / ESD / complex fourier. It has the same number of dim as the input. Frequency transform is computed on the last dimension. If output='complex', there is one additional dimension with the taper(s). Notes ----- The nomenclature of the frequency-domain analysis is not very consistent across packages / toolboxes. The convention used here is based on `wikipedia`_ So, you can have the spectral density (called sometimes power spectrum) or a complex output. Conceptually quite different but they can both be computed using the fft algorithm, so we do both here. Regarding the spectral density, you can have the power spectral density (PSD) or the energy spectral density (ESD). PSD should be used for stationary signals (gamma activity), while ESD should be used for signals that have a clear beginning and end (spindles). ESD gives the energy over the whole duration of the input window, while PSD is normalized by the window length. Parseval's theorem says that the energy of the signal in the time-domain must be equal to the energy in the frequency domain. All the tapers are correct to comply with this theorem (see tests/test_trans_frequency.py for all the examples). Note that packages such as 'FieldTrip' and 'Chronux' do not usually respect this convention (and use some ad-hoc convention). You can use the scaling of these packages to compare the results from those matlab toolboxes, but note that the results probably don't satisty Parseval's theorem. Note that scipy.signal is not consistent with these names, but the formulas are the same. Also, scipy (v1.1 at least) does not handle dpss. Finally, the complex output has an additional dimension (taper), for each taper (even for the boxcar or hann taper). This is useful for multitaper analysis (DPSS), where it doesn't make sense to average complex results. .. _wikipedia: https://en.wikipedia.org/wiki/Spectral_density TODO ---- Scipy v1.1 can generate dpss tapers. Once scipy v1.1 is available, use that instead of the extern folder. """ if output == 'complex' and sides == 'one': print('complex always returns both sides') sides = 'two' axis = x.ndim - 1 n_smp = x.shape[axis] if n_fft is None: n_fft = n_smp if sides == 'one': freqs = np_fft.rfftfreq(n_fft, 1 / s_freq) elif sides == 'two': freqs = fftpack.fftfreq(n_fft, 1 / s_freq) if taper is None: taper = 'boxcar' if taper == 'dpss': if NW is None: NW = halfbandwidth * n_smp / s_freq tapers, eig = dpss_windows(n_smp, NW, 2 * NW - 1) if scaling == 'chronux': tapers *= sqrt(s_freq) else: if taper == 'hann': tapers = windows.hann(n_smp, sym=False)[None, :] else: # TODO: it'd be nice to use sym=False if possible, but the difference is very small tapers = get_window(taper, n_smp)[None, :] if scaling == 'energy': rms = sqrt(mean(tapers**2)) tapers /= rms * sqrt(n_smp) elif scaling != 'chronux': # idk how chronux treats other windows apart from dpss tapers /= norm(tapers) if detrend is not None: x = detrend_func(x, axis=axis, type=detrend) tapered = tapers * x[..., None, :] if sides == 'one': result = np_fft.rfft(tapered, n=n_fft) elif sides == 'two': result = fftpack.fft(tapered, n=n_fft) if scaling == 'chronux': result /= s_freq elif scaling == 'fieldtrip': result *= sqrt(2 / n_smp) if output == 'spectraldensity': result = (result.conj() * result) elif output == 'csd': result = (result[None, 0, ...].conj() * result[None, 1, ...]) if (sides == 'one' and output in ('spectraldensity', 'csd') and scaling != 'chronux'): if n_fft % 2: result[..., 1:] *= 2 else: # Last point is unpaired Nyquist freq point, don't double result[..., 1:-1] *= 2 if scaling == 'power': scale = 1.0 / s_freq elif scaling == 'energy': scale = 1.0 / n_smp else: scale = 1 if output == 'complex' and scaling in ('power', 'energy'): scale = sqrt(scale) result *= scale if scaling == 'fieldtrip' and output in ('spectraldensity', 'csd'): # fieldtrip uses only one side result /= 2 if output in ('spectraldensity', 'csd'): if output == 'spectraldensity': result = result.real result = mean(result, axis=axis) elif output == 'complex': # dpss should be last dimension in complex, no mean result = swapaxes(result, axis, -1) return freqs, result
yData.append(math.sin(i * 0.1) + 0.5*math.sin(i*0.3)) data = np.asarray([xData, yData]) return data # amplitude of sine waves from fft is magnitude of complex vector # phase of sine waves from fft is angle of complex vector # divide amplitude by number of samples to get actual amplitude # ignore all fft values above half of sampling frequency and increase amplitudes # of remaining fft values accordingly, ie if half of fft values removed, double # remaining amplitudes # phase is given in terms of cosine terms if __name__ == '__main__': data = generate_data() rfftData = fft.rfft(data[1]) fftFrequencies = fft.rfftfreq(len(data[1]), 0.1) extremaIndeces = extremaCalculator.findAllExtrema(data[1]) print(extremaIndeces) for i, val in enumerate(extremaIndeces[0]): plt.plot(data[0][val], data[1][val], 'rx') magnitudeArray = [] for i in range(0, len(rfftData)): magnitudeArray.append(abs(rfftData[i])/len(rfftData)) magnitudeArray = np.asarray(magnitudeArray) # plt.plot(fftFrequencies, magnitudeArray) plt.plot(data[0], data[1])
def GetAttenuationRateImage(self, pathkey, ScanIndex, RefSignal, fpower, resolution=0.1, fband=None, windowparams=(50, 0.1), fftpad=4, solverparams=(1e-6, 1e-6, 1000, True)): from numpy.fft import ifft, fftshift, rfft, ifftshift from scipy.signal import tukey from scipy.ndimage import zoom from matplotlib.pyplot import plot, show from scipy.sparse.linalg import lsqr # from numpy.linalg import dot fs = self.Capture.SamplingFrequency Gexp = [] W = tukey(int(2 * windowparams[0]), windowparams[1]) NFFT = int(fftpad * 2 * windowparams[0]) a = self.Capture.AScans[ScanIndex] f = np.linspace(0., fs / 2, np.floor(NFFT / 2) + 1) N = self.Capture.NumberOfElements Aref = rfft(W * RefSignal, NFFT) for m in range(N): for n in range(m, N): ind = int( np.round(fs * self.PathParameters[pathkey]['Delays'][m][n])) A = rfft( W * a[m, n, ind - windowparams[0]:ind + windowparams[0]], NFFT) if fband is None: indf = GetSpectralRange(Aref, Aref) else: indf = np.where((f >= fband[0]) & (f <= fband[1]))[0] AAref = Aref[indf] A = A[indf] Arefmax = np.amax(abs(AAref)) v = FitPowerLaw(f[indf], -np.log(np.abs(A / AAref)), fpower) Gexp.append(v[0]) Gexp = np.array(Gexp).reshape(-1, 1) np.nan_to_num(Gexp, False) # Iexp = np.dot(self.ProjectionMatrixInverse[pathkey],Gexp) # Iavg = np.dot(self.ProjectionMatrixInverse[pathkey],Gavg) Iexp, convexp = SolveCimmino(self.ProjectionMatrix[pathkey], Gexp, solverparams[0], solverparams[1], solverparams[2], solverparams[3]) xmin = np.amin(self.Grid['x']) xmax = np.amax(self.Grid['x']) ymin = np.amin(self.Grid['y']) ymax = np.amax(self.Grid['y']) x, y = np.meshgrid( np.linspace(xmin, xmax, int(np.round((xmax - xmin) / resolution))), np.linspace(ymin, ymax, int(np.round((ymax - ymin) / resolution)))) Iexp = BilinearInterp(x, y, self.Grid['x'], self.Grid['y'], Iexp).reshape(x.shape) return Iexp, convexp, Gexp
# when the sound is near to that, play that same message pts = {} pts[0] = array([]) pts[0].resize(513) lastnote = 0 bufnum = 0 while True: try: # for j in range(window/N-1) : # input[:,j*N] = input[:,(j+1)*N] jack.process(output, input[:, -N:]) x = rfft(input[0] * hamming(window)) x = array( map(lambda c: math.sqrt(c.real * c.real + c.imag * c.imag), x)) dists = [(i, linalg.norm(x - pt)) for i, pt in pts.iteritems()] _, mini = findminp(dists) # if lastnote != mini : midiout.sendMessage(144, mini, 0) lastnote = mini if mini != 0: # TODO: use RMS volume of input signal as value here midiout.sendMessage(144, mini, 120) print mini #dist0 = linalg.norm(x-pt0)
def stft( time_signal, size, shift, *, axis=-1, window=signal.blackman, window_length, fading, pad, symmetric_window, ): """ ToDo: Open points: - sym_window need literature - fading why it is better? - should pad have more degrees of freedom? Calculates the short time Fourier transformation of a multi channel multi speaker time signal. It is able to add additional zeros for fade-in and fade out and should yield an STFT signal which allows perfect reconstruction. Args: time_signal: Multi channel time signal with dimensions AA x ... x AZ x T x BA x ... x BZ. size: Scalar FFT-size. shift: Scalar FFT-shift, the step between successive frames in samples. Typically shift is a fraction of size. axis: Scalar axis of time. Default: None means the biggest dimension. window: Window function handle. Default is blackman window. fading: Pads the signal with zeros for better reconstruction. window_length: Sometimes one desires to use a shorter window than the fft size. In that case, the window is padded with zeros. The default is to use the fft-size as a window size. pad: If true zero pad the signal to match the shape, else cut symmetric_window: symmetric or periodic window. Assume window is periodic. Since the implementation of the windows in scipy.signal have a curious behaviour for odd window_length. Use window(len+1)[:-1]. Since is equal to the behaviour of MATLAB. Returns: Single channel complex STFT signal with dimensions AA x ... x AZ x T' times size/2+1 times BA x ... x BZ. """ time_signal = np.array(time_signal) axis = axis % time_signal.ndim if window_length is None: window_length = size # Pad with zeros to have enough samples for the window function to fade. if fading: pad_width = np.zeros((time_signal.ndim, 2), dtype=np.int) pad_width[axis, :] = window_length - shift time_signal = np.pad(time_signal, pad_width, mode='constant') if symmetric_window: window = window(window_length) else: # https://github.com/scipy/scipy/issues/4551 window = window(window_length + 1)[:-1] time_signal_seg = segment_axis_v2( time_signal, window_length, shift=shift, axis=axis, end='pad' if pad else 'cut' ) letters = string.ascii_lowercase[:time_signal_seg.ndim] mapping = letters + ',' + letters[axis + 1] + '->' + letters try: # ToDo: Implement this more memory efficient return rfft( np.einsum(mapping, time_signal_seg, window), n=size, axis=axis + 1 ) except ValueError as e: raise ValueError( f'Could not calculate the stft, something does not match.\n' f'mapping: {mapping}, ' f'time_signal_seg.shape: {time_signal_seg.shape}, ' f'window.shape: {window.shape}, ' f'size: {size}' f'axis+1: {axis+1}' ) from e
def check_the_extrema(sample: dict): # check the peaks # define which extremum is closer to infra-red range and which to ultra-violet right_extremum = None if 435.0 > sample['minimum'][0] > 405.0 and sample['minimum'][0] > sample[ 'maximum'][0]: # 405 - is additional check for the right_extremum = sample['minimum'] elif 405.0 < sample['maximum'][0] < 435.0 and sample['minimum'][ 0] < sample['maximum'][0]: right_extremum = sample['maximum'] sample['reversed FFT'] = None # check if the right extremum exists if right_extremum is not None: # make the fft (real), collect only first 3 frequencies and make ifft (real) rft = rfft(sample['answer_range'][1]) rft[3:] = 0 irft = irfft(rft) sample['reversed FFT'] = irft # get the polynomial fit of the ifft curve with appropriate range polyfit_range = 3 r_squar = 0 coeffs = None p = None while r_squar < 0.999: coeffs = np.polyfit(sample['answer_range'][0], irft, polyfit_range) # r-squared p = np.poly1d(coeffs) # fit values, and mean yhat = p(sample['answer_range'][0]) # or [p(z) for z in x] ybar = np.sum(irft) / len(irft) # or sum(y)/len(y) ssreg = np.sum( (yhat - ybar)**2) # or sum([ (yihat - ybar)**2 for yihat in yhat]) sstot = np.sum( (irft - ybar)**2) # or sum([ (yi - ybar)**2 for yi in y]) r_squar = ssreg / sstot polyfit_range += 1 if coeffs is not None: coeffs = list(coeffs) coeffs.reverse() deriv_poly = [coeffs[i] * i for i in range(1, len(coeffs))] deriv_poly.reverse() roots = np.roots(deriv_poly) test_points = [p(root) for root in roots] test_points.insert(0, p(sample['answer_range'][0][0])) test_points.append(p(sample['answer_range'][0][-1])) # test roots and remove ones that do not illustrate real extrema confirmed_roots = {} for root_index in range(0, len(roots)): left_y = test_points[root_index] # root_y = test_points[ root_index + 1] # because test points contain additional elements at the beginning and the end right_y = test_points[root_index + 2] # if left_y > root_y < right_y or left_y < root_y > right_y: if sample['answer_range'][0][0] <= roots[ root_index] <= sample['answer_range'][0][-1]: confirmed_roots[roots[root_index]] = test_points[ root_index + 1] print(f'confirmed roots: {confirmed_roots.keys()}') # get distances between the extrema distances = {} start = 0 for root in confirmed_roots: for i in range(start, len(confirmed_roots)): key = list(confirmed_roots.keys())[i] if key != root: distances[f'{root}-{key}'] = abs( confirmed_roots[root] - confirmed_roots[key]) start += 1 a = [ x / sorted(distances.values())[-1] for x in sorted(distances.values()) ] bins = {} bins[1] = [x for x in a if x < 0.25] bins[2] = [x for x in a if 0.25 < x < 0.5] bins[3] = [x for x in a if 0.5 < x < 0.75] bins[4] = [x for x in a if 0.9 < x] # pprint(bins) pprint(distances) if len(bins[4]) == 1: print('bins answer True, applying additional check') # find pair of extrema that have maximal distance by Y between them left_extremum = None right_extremum = None dist = sys.float_info.min xs = list(confirmed_roots.keys()) for first_root_index in range(0, len(xs)): left_root_x = xs[first_root_index] left_root_y = confirmed_roots[left_root_x] for second_root_index in range(first_root_index + 1, len(xs)): right_root_x = xs[second_root_index] right_root_y = confirmed_roots[right_root_x] current_distance = abs(right_root_y - left_root_y) if current_distance > dist: left_extremum = left_root_x right_extremum = right_root_x dist = current_distance left_extremum, right_extremum = right_extremum, left_extremum # as our xs are somehow sorted in reverse order, flip them back print( f'extrema with maximal distance have the following X: {left_extremum} and {right_extremum}' ) if left_extremum is not None: # cut the graph by left and right extrema shortened_xs = [] shortened_ys = [] for element_index in range( 0, len(sample['answer_range'][0]) - 1): x = sample['answer_range'][0][element_index] if left_extremum < x < right_extremum: y = irft[element_index] shortened_xs.append(x) shortened_ys.append(y) # find R^2 value for 3rd polynome approximation coeffs = np.polyfit(shortened_xs, shortened_ys, 3) # r-squared p = np.poly1d(coeffs) # fit values, and mean yhat = p(shortened_xs) # or [p(z) for z in x] ybar = np.sum(shortened_ys) / len(shortened_ys) ssreg = np.sum((yhat - ybar)**2) sstot = np.sum((shortened_ys - ybar)**2) r_squar = ssreg / sstot print(f'r squared for shortened range: {r_squar}') if r_squar > 0.98: print('shortened answer True') sample['positive_answer'] = True else: print('shortened answer False') else: print('bins answer False') pprint(f"answer maximum: {sample['maximum']}") pprint(f"answer minimum: {sample['minimum']}") print(f"Answer: {sample['positive_answer']}") print('-' * 10)
def spectrum(length, u, v, w): data_path = "./" Figs_Path = "./" Fig_file_name = "Ek_Spectrum" # ----------------------------------------------------------------- # COMPUTATIONS # ----------------------------------------------------------------- localtime = time.asctime(time.localtime(time.time())) print("Computing spectrum... ", localtime) N = int(round((length**(1. / 3)))) print("N =", N) eps = 1e-50 # to void log(0) U = u V = v W = w amplsU = abs(fftn(U) / U.size) amplsV = abs(fftn(V) / V.size) amplsW = abs(fftn(W) / W.size) EK_U = amplsU**2 EK_V = amplsV**2 EK_W = amplsW**2 EK_U = fftshift(EK_U) EK_V = fftshift(EK_V) EK_W = fftshift(EK_W) sign_sizex = np.shape(EK_U)[0] sign_sizey = np.shape(EK_U)[1] sign_sizez = np.shape(EK_U)[2] box_sidex = sign_sizex box_sidey = sign_sizey box_sidez = sign_sizez box_radius = int( np.ceil((np.sqrt((box_sidex)**2 + (box_sidey)**2 + (box_sidez)**2)) / 2.) + 1) centerx = int(box_sidex / 2) centery = int(box_sidey / 2) centerz = int(box_sidez / 2) print("box sidex =", box_sidex) print("box sidey =", box_sidey) print("box sidez =", box_sidez) print("sphere radius =", box_radius) print("centerbox =", centerx) print("centerboy =", centery) print("centerboz =", centerz, "\n") EK_U_avsphr = np.zeros(box_radius, ) + eps ## size of the radius EK_V_avsphr = np.zeros(box_radius, ) + eps ## size of the radius EK_W_avsphr = np.zeros(box_radius, ) + eps ## size of the radius for i in range(box_sidex): for j in range(box_sidey): for k in range(box_sidez): wn = int( round( np.sqrt((i - centerx)**2 + (j - centery)**2 + (k - centerz)**2))) EK_U_avsphr[wn] = EK_U_avsphr[wn] + EK_U[i, j, k] EK_V_avsphr[wn] = EK_V_avsphr[wn] + EK_V[i, j, k] EK_W_avsphr[wn] = EK_W_avsphr[wn] + EK_W[i, j, k] print('iterating' + str(i), flush=True) EK_avsphr = 0.5 * (EK_U_avsphr + EK_V_avsphr + EK_W_avsphr) fig2 = plt.figure() #plt.title("Kinetic Energy Spectrum") plt.xlabel(r"k") plt.ylabel(r"E(k)") realsize = len(rfft(U[:, 0, 0])) plt.loglog(np.arange(0, realsize), ((EK_avsphr[0:realsize])), 'k') plt.loglog(np.arange(realsize, len(EK_avsphr), 1), ((EK_avsphr[realsize:])), 'k--') axes = plt.gca() axes.set_ylim([10**-25, 5**-1]) print("Real Kmax = ", realsize) print("Spherical Kmax = ", len(EK_avsphr)) TKEofmean_discrete = 0.5 * (sum(U / U.size)**2 + sum(W / W.size)**2 + sum(W / W.size)**2) TKEofmean_sphere = EK_avsphr[0] total_TKE_discrete = sum(0.5 * (U**2 + V**2 + W**2)) / (N * 1.0)**3 total_TKE_sphere = sum(EK_avsphr) print("the KE of the mean velocity discrete = ", TKEofmean_discrete) print("the KE of the mean velocity sphere = ", TKEofmean_sphere) print("the mean KE discrete = ", total_TKE_discrete) print("the mean KE sphere = ", total_TKE_sphere) localtime = time.asctime(time.localtime(time.time())) print("Computing spectrum... ", localtime, "- END \n") # ----------------------------------------------------------------- # OUTPUT/PLOTS # ----------------------------------------------------------------- dataout = np.zeros((box_radius, 2)) dataout[:, 0] = np.arange(0, len(dataout)) dataout[:, 1] = EK_avsphr[0:len(dataout)] #savetxt(Figs_Path + Fig_file_name + '.dat', dataout) #fig.savefig(Figs_Path + Fig_file_name + '.pdf') return fig2
plt.plot(xvals, x[:,0]/16, color="grey") plt.plot(xvals, x[:,1]/16, color="green") plt.plot(xvals, x[:,2]/16, color="red") plt.xlabel("Time, s") plt.ylabel("Pulse length, us") plt.title("Zero crossing pulse length, R input = 16.4k R pullup = 10k") plt.savefig("Zero crossing pulse length, R input = 16k4 R pullup = 10k") plt.figure() xvals = np.arange(0.0, N/2+1, 1) xvals = xvals/np.max(xvals)*50 plt.plot(xvals, signal.medfilt( 20*scipy.log10( abs( ff.rfft( (x[:,0]) / np.max(x[:,0]) ) ) ),355) ) plt.xlabel("Hz") plt.ylabel("Magnitude, db") plt.title("Zero crossing pulse length FFT, R input = 16.4k R pullup = 10k") plt.savefig("Zero crossing pulse length FFT, R input = 16k4 R pullup = 10k") plt.figure() std = np.std(diff) sigma = std plt.hist(diff, bins=( np.min(diff)*-1 +
def calculate_levels(data, chunk_size, sample_rate, frequency_limits, num_bins, input_channels=2): """Calculate frequency response for each channel defined in frequency_limits :param data: decoder.frames(), audio data for fft calculations :type data: decoder.frames :param chunk_size: chunk size of audio data :type chunk_size: int :param sample_rate: audio file sample rate :type sample_rate: int :param frequency_limits: list of frequency_limits :type frequency_limits: list :param num_bins: length of gpio to process :type num_bins: int :param input_channels: number of audio input channels to process for (default=2) :type input_channels: int :return: :rtype: numpy.array """ # create a numpy array, taking just the left channel if stereo data_stereo = frombuffer(data, dtype=int16) if input_channels == 2: # data has 2 bytes per channel data = empty(len(data) / (2 * input_channels)) # pull out the even values, just using left channel data[:] = data_stereo[::2] elif input_channels == 1: data = data_stereo # if you take an FFT of a chunk of audio, the edges will look like # super high frequency cutoffs. Applying a window tapers the edges # of each end of the chunk down to zero. data = data * hanning(len(data)) # Apply FFT - real data fourier = fft.rfft(data) # Remove last element in array to make it the same size as chunk_size fourier = delete(fourier, len(fourier) - 1) # Calculate the power spectrum power = npabs(fourier) ** 2 matrix = zeros(num_bins, dtype='float64') for pin in range(num_bins): # take the log10 of the resulting sum to approximate how human ears # perceive sound levels # Get the power array index corresponding to a particular frequency. idx1 = int(chunk_size * frequency_limits[pin][0] / sample_rate) idx2 = int(chunk_size * frequency_limits[pin][1] / sample_rate) # if index1 is the same as index2 the value is an invalid value # we can fix this by incrementing index2 by 1, This is a temporary fix # for RuntimeWarning: invalid value encountered in double_scalars # generated while calculating the standard deviation. This warning # results in some channels not lighting up during playback. if idx1 == idx2: idx2 += 1 npsums = npsum(power[idx1:idx2:1]) # if the sum is 0 lets not take log10, just use 0 # eliminates RuntimeWarning: divide by zero encountered in log10, does not insert -inf if npsums == 0: matrix[pin] = 0 else: matrix[pin] = log10(npsums) return matrix
def run(self, tf, dt, c1, c2): np.random.seed(62) """ Run a simulation """ n, m, k = self.n, self.m, self.k # Total simulation time simTime = int(tf / dt) # Returns the three synaptic connections kernels W12, W21, W22, delays = self.build_kernels() # Compute delays by dividing distances by axonal velocity delays12 = np.floor(delays[0] / c2) delays21 = np.floor(delays[1] / c1) delays22 = np.floor(delays[2] / c2) maxDelay = int( max(delays12[0].max(), delays21[0].max(), delays22[0].max())) # Set the initial conditions and the history self.initial_conditions(simTime) # Initialize the cortical and striatal inputs Cx = 0.5 Str = 0.4 # Presynaptic activities pre12, pre21, pre22 = np.empty((m, )), np.empty((m, )), np.empty((m, )) # Simulation for i in range(maxDelay, simTime): # Take into account the history of rate for each neuron according # to its axonal delay for idxi, ii in enumerate(range(m)): mysum = 0.0 for jj in range(k, n): mysum += (W12[ii, jj] * self.X2[i - delays12[ii, jj], jj]) * self.dx pre12[idxi] = mysum for idxi, ii in enumerate(range(k, n)): mysum = 0.0 for jj in range(0, m): mysum += (W21[ii, jj] * self.X1[i - delays21[ii, jj], jj]) * self.dx pre21[idxi] = mysum for idxi, ii in enumerate(range(k, n)): mysum = 0.0 for jj in range(k, n): mysum += (W22[ii, jj] * self.X2[i - delays22[ii, jj], jj]) * self.dx pre22[idxi] = mysum # Forward Euler step self.X1[i, :m] = ( self.X1[i - 1, :m] + (-self.X1[i - 1, :m] + self.S1(-pre12 + Cx)) * dt / self.tau1) self.X2[i, k:] = ( self.X2[i - 1, k:] + (-self.X2[i - 1, k:] + self.S2(pre21 - pre22 - Str)) * dt / self.tau2) dx = 1.0 / float(m) fr = self.X1.sum(axis=1) * dx / 1.0 signal = detrend(fr) windowed = signal * blackmanharris(len(signal)) f = rfft(windowed) i = np.argmax(np.abs(f)) # true_i = parabolic(np.log(np.abs(f)), i)[0] return i
def get_spectrogram(filename, fft_length): fp = audiolab.Sndfile(filename, 'r') sample_rate = fp.samplerate total_num_samps = fp.nframes num_fft = (total_num_samps / fft_length) - 2 # create temporary working array fft_buckets = np.zeros((num_fft, fft_length), float) channels = fp.channels # read in the data from the file for i in range(num_fft): frames = fp.read_frames(fft_length) if channels == 2: # TODO: figure out how to combine channels appropriately fft_buckets[i, :] = frames[:, 0] - 128.0 elif channels == 1: fft_buckets[i, :] = frames - 128.0 else: raise Exception("Unsupported # of channels: %d" % channels) # Window the data fft_buckets = fft_buckets * np.hamming(fft_length) # Transform with the FFT, Return Power freq_pwr = 10 * np.log10(1e-20 + abs(rfft(fft_buckets, fft_length))) n_out_pts = (fft_length / 2) + 1 axis_hz = 0.5 * float(sample_rate) / n_out_pts * np.arange(n_out_pts) axis = axis_hz / 1000 audio = [] audionorm = [] trans = freq_pwr.transpose() for freq in trans: audio.append(freq.sum()) audionorm.append(freq.sum() / n_out_pts) #plt.plot(axis, audionorm) window = [-1, 0, 1] slope = np.convolve(audio, window, mode='same') / np.convolve( range((fft_length / 2) + 1), window, mode='same') slopes = [] slopenorm = [] for point in slope: slopes.append(point) slopenorm.append(point / n_out_pts) #plt.plot(axis, slopenorm) #plt.show() highfreq = 0 for hz in axis_hz: if hz > highfreq: highfreq = hz freqinc = highfreq / ((fft_length / 2) + 1) freqcut = int(10000 / freqinc) slopespart = slopenorm[freqcut:] # Find local minima localminima = [] for i in range(len(slopespart) - 2): if slopespart[i] > slopespart[i + 1] and slopespart[ i + 1] < slopespart[i + 2] and slopespart[i + 1] < -10: localminima.append(slopespart[i + 1]) if len(localminima) < 1: return "xxxxx" last = slopenorm.index(localminima[-1]) * freqinc if last > 20500: last = slopenorm.index(localminima[-2]) * freqinc lastround = round(last / 500) * 500 bitfreqs = { 20000: '320', 19500: '256', 19000: 'v0', 18500: '192', 18000: 'v2', 16500: '128' } #print last, lastround #print "Best guess at source quality: " + bitfreqs[lastround] return str(int(lastround)) + " - " + str(int(last))
def simulate_wm(N_excitatory=2048, N_inhibitory=512, N_extern_poisson=1000, poisson_firing_rate=1.8 * Hz, sigma_weight_profile=14.4, Jpos_excit2excit=1.63, stimulus1_center_deg=180, stimulus2_center_deg=235, stimulus_width_deg=60, stimulus_strength=0.07 * namp, t_stimulus1_start=0 * ms, t_stimulus2_start=4000 * ms, t_stimulus_duration=0 * ms, t_delay1=3000 * ms, t_delay2=3000 * ms, t_iti_duration=300 * ms, sim_time=2000. * ms, monitored_subset_size=1024): """ Args: N_excitatory (int): Size of the excitatory population N_inhibitory (int): Size of the inhibitory population weight_scaling_factor (float): weight prefactor. When increasing the size of the populations, the synaptic weights have to be decreased. Using the default values, we have N_excitatory*weight_scaling_factor = 2048 and N_inhibitory*weight_scaling_factor=512 N_extern_poisson (int): Size of the external input population (Poisson input) poisson_firing_rate (Quantity): Firing rate of the external population sigma_weight_profile (float): standard deviation of the gaussian input profile in the excitatory population. Jpos_excit2excit (float): Strength of the recurrent input within the excitatory population. Jneg_excit2excit is computed from sigma_weight_profile, Jpos_excit2excit and the normalization condition. stimulus_center_deg (float): Center of the stimulus in [0, 360] stimulus_width_deg (float): width of the stimulus. All neurons in stimulus_center_deg +- (stimulus_width_deg/2) receive the same input current stimulus_strength (Quantity): Input current to the neurons at stimulus_center_deg +- (stimulus_width_deg/2) t_stimulus_start (Quantity): time when the input stimulus is turned on t_stimulus_duration (Quantity): duration of the stimulus. monitored_subset_size (int): nr of neurons for which a Spike- and Voltage monitor is registered. sim_time (Quantity): simulation time Returns: results (tuple): rate_monitor_excit (Brian2 PopulationRateMonitor for the excitatory population), spike_monitor_excit, voltage_monitor_excit, idx_monitored_neurons_excit,\ rate_monitor_inhib, spike_monitor_inhib, voltage_monitor_inhib, idx_monitored_neurons_inhib,\ weight_profile_45 (The weights profile for the neuron with preferred direction = 45deg). """ global par print par devices.device.seed(os.getpid()) # specify the excitatory pyramidal cells: Cm_excit = 0.5 * nF # membrane capacitance of excitatory neurons G_leak_excit = 25.0 * nS # leak conductance E_leak_excit = -70.0 * mV # reversal potential v_firing_threshold_excit = -50.0 * mV # spike condition v_reset_excit = -60.0 * mV # reset voltage after spike t_abs_refract_excit = 2.0 * ms # absolute refractory period # specify the weight profile in the recurrent population # std-dev of the gaussian weight profile around the prefered direction # sigma_weight_profile = 12.0 # std-dev of the gaussian weight profile around the prefered direction # Jneg_excit2excit = 0 # specify the inhibitory interneurons: Cm_inhib = 0.2 * nF G_leak_inhib = 20.0 * nS E_leak_inhib = -70.0 * mV v_firing_threshold_inhib = -50.0 * mV v_reset_inhib = -60.0 * mV t_abs_refract_inhib = 1.0 * ms # specify the AMPA synapses E_AMPA = 0.0 * mV tau_AMPA = 2.0 * ms # specify the GABA synapses E_GABA = -70.0 * mV tau_GABA = 10.0 * ms # specify the NMDA synapses E_NMDA = 0.0 * mV tau_NMDA_s = 100.0 * ms tau_NMDA_x = 2.0 * ms alpha_NMDA = 0.5 * kHz weight_scaling_factor = 2048. / N_excitatory # projections from the external population G_extern2inhib = 2.38 * nS G_extern2excit = 3.1 * nS # projectsions from the inhibitory populations G_inhib2inhib = weight_scaling_factor * 1.024 * nS G_inhib2excit = weight_scaling_factor * 1.336 * nS # projections from the excitatory population NMDA G_excit2excit = weight_scaling_factor * 0.28 * nS #nmda+ampa G_excit2inhib = weight_scaling_factor * 0.212 * nS # nmda+ampa # recurrent AMPA G_excit2excitA = weight_scaling_factor * 1. * 0.251 * nS #ampa GEEA = G_excit2excitA / G_extern2excit G_excit2inhibA = weight_scaling_factor * 0.192 * nS #ampa GEIA = G_excit2inhibA / G_extern2inhib # STDP taupre = 20 * ms taupost = 20 * ms wmax = 2. #1.1 Apre = par #0.00035 #0.00025 #set to zero to deactivate STDP # 0.02 Apost = Apre #-Apre *taupre/taupost*1.03 #1.4 #negative for LTD, positive for LTP stp_decay = 0.04 #25 #0.025 # Apost = Apre *taupre/taupost #negative for LTD, positive for LTP # compute the simulus index stim1_center_idx = int(round(N_excitatory / 360. * stimulus1_center_deg)) stim1_width_idx = int(round(N_excitatory / 360. * stimulus_width_deg / 2)) stim1_target_idx = [ idx % N_excitatory for idx in range(stim1_center_idx - stim1_width_idx, stim1_center_idx + stim1_width_idx + 1) ] stim2_center_idx = int(round(N_excitatory / 360. * stimulus2_center_deg)) stim2_width_idx = int(round(N_excitatory / 360. * stimulus_width_deg / 2)) stim2_target_idx = [ idx % N_excitatory for idx in range(stim2_center_idx - stim2_width_idx, stim2_center_idx + stim2_width_idx + 1) ] # precompute the weight profile for the recurrent population tmp = math.sqrt(2. * math.pi) * sigma_weight_profile * erf( 180. / math.sqrt(2.) / sigma_weight_profile) / 360. Jneg_excit2excit = (1. - Jpos_excit2excit * tmp) / (1. - tmp) presyn_weight_kernel = [ (Jneg_excit2excit + (Jpos_excit2excit - Jneg_excit2excit) * math.exp(-.5 * (360. * min(nj, N_excitatory - nj) / N_excitatory)**2 / sigma_weight_profile**2)) for nj in range(N_excitatory) ] fft_presyn_weight_kernel = rfft(presyn_weight_kernel) # define the inhibitory population a = 0.062 / mV inhib_lif_dynamics = """ s_NMDA_total : 1 # the post synaptic sum of s. compare with s_NMDA_presyn dv/dt = ( - G_leak_inhib * (v-E_leak_inhib) - G_extern2inhib * s_AMPA * (v-E_AMPA) - G_inhib2inhib * s_GABA * (v-E_GABA) - G_excit2inhib * s_NMDA_total * (v-E_NMDA)/(1.0+1.0*exp(-a*v)/3.57) )/Cm_inhib : volt (unless refractory) ds_AMPA/dt = -s_AMPA/tau_AMPA : 1 ds_GABA/dt = -s_GABA/tau_GABA : 1 """ inhib_pop = NeuronGroup(N_inhibitory, model=inhib_lif_dynamics, threshold="v>v_firing_threshold_inhib", reset="v=v_reset_inhib", refractory=t_abs_refract_inhib, method="rk2") # initialize with random voltages: inhib_pop.v = np.random.uniform(v_reset_inhib / mV, high=v_firing_threshold_inhib / mV, size=N_inhibitory) * mV # set the connections: inhib2inhib syn_inhib2inhib = Synapses(inhib_pop, target=inhib_pop, on_pre="s_GABA += 1.0", delay=0.0 * ms) syn_inhib2inhib.connect(condition="i!=j", p=1.0) # syn_inhib2inhib.connect(p=1.0) # set the connections: extern2inhib input_ext2inhib = PoissonInput(target=inhib_pop, target_var="s_AMPA", N=N_extern_poisson, rate=poisson_firing_rate, weight=1.0) # specify the excitatory population: excit_lif_dynamics = """ I_stim : amp s_NMDA_total : 1 # the post synaptic sum of s. compare with s_NMDA_presyn dv/dt = ( - G_leak_excit * (v-E_leak_excit) - G_extern2excit * s_AMPA * (v-E_AMPA) - G_inhib2excit * s_GABA * (v-E_GABA) - G_excit2excit * s_NMDA_total * (v-E_NMDA)/(1.0+1.0*exp(-a*v)/3.57) + I_stim )/Cm_excit : volt (unless refractory) ds_AMPA/dt = -s_AMPA/tau_AMPA : 1 ds_GABA/dt = -s_GABA/tau_GABA : 1 ds_NMDA/dt = -s_NMDA/tau_NMDA_s + alpha_NMDA * x * (1-s_NMDA) : 1 dx/dt = -x/tau_NMDA_x : 1 """ excit_pop = NeuronGroup(N_excitatory, model=excit_lif_dynamics, threshold="v>v_firing_threshold_excit", reset="v=v_reset_excit", refractory=t_abs_refract_excit, method="rk2") # initialize with random voltages: excit_pop.v = np.random.uniform(v_reset_excit / mV, high=v_firing_threshold_excit / mV, size=N_excitatory) * mV excit_pop.I_stim = 0. * namp # set the connections: extern2excit input_ext2excit = PoissonInput(target=excit_pop, target_var="s_AMPA", N=N_extern_poisson, rate=poisson_firing_rate, weight=1.0) # set the connections: inhibitory to excitatory syn_inhib2excit = Synapses(inhib_pop, excit_pop, on_pre="s_GABA += 1.0") syn_inhib2excit.connect(p=1.0) # set the connections: excitatory to inhibitory NMDA connections syn_excit2inhib = Synapses( excit_pop, inhib_pop, model="s_NMDA_total_post = s_NMDA_pre : 1 (summed)", method="rk2") syn_excit2inhib.connect(p=1.0) # # set the connections: UNSTRUCTURED excitatory to excitatory # syn_excit2excit = Synapses(excit_pop, excit_pop, # model= "s_NMDA_total_post = s_NMDA_pre : 1 (summed)", method="rk2") # syn_excit2excit.connect(condition="i!=j", p=1.) # set the STRUCTURED recurrent AMPA input # equations for weights, trace decay synapse_eqs = ''' w : 1 stp : 1 dapre/dt = -apre/ taupre : 1 (event-driven) dapost/dt = -apost / taupost : 1 (event-driven) ''' # equations for presynaptic spike eqs_pre = ''' s_AMPA_post += w*stp x_pre += (1.0/N_excitatory)*stp apre += Apre stp = clip(stp + apost - stp_decay * (stp - 1.), 0, wmax) ''' # equations for postsynaptic spike eqs_post = ''' apost += Apost stp = clip(stp + apre, 0, wmax) ''' syn_excit2excit = Synapses(excit_pop, excit_pop, synapse_eqs, on_pre=eqs_pre, on_post=eqs_post) syn_excit2excit.connect(condition='i!=j', p=1.0) syn_excit2excit.stp = 1.0 syn_excit2excit.w[ 'abs(i-j)<N_excitatory/2'] = 'GEEA *(Jneg_excit2excit + (Jpos_excit2excit - Jneg_excit2excit) * exp(-.5 * (360. * abs(i-j) / N_excitatory) ** 2 / sigma_weight_profile ** 2))' syn_excit2excit.w[ 'abs(i-j)>=N_excitatory/2'] = 'GEEA *(Jneg_excit2excit + (Jpos_excit2excit - Jneg_excit2excit) * exp(-.5 * (360. * (N_excitatory - abs(i-j)) / N_excitatory) ** 2 / sigma_weight_profile ** 2))' syn_excit2inhibA = Synapses(excit_pop, inhib_pop, model="w : 1", on_pre="s_AMPA_post += w") syn_excit2inhibA.connect(p=1.0) syn_excit2inhibA.w = GEIA # set the STRUCTURED recurrent NMDA input. use a network_operation @network_operation() def update_nmda_sum(): fft_s_NMDA = rfft(excit_pop.s_NMDA) fft_s_NMDA_total = np.multiply(fft_presyn_weight_kernel, fft_s_NMDA) s_NMDA_tot = irfft(fft_s_NMDA_total, N_excitatory) excit_pop.s_NMDA_total_ = s_NMDA_tot # excit_pop.s_NMDA_total = s_NMDA_tot # inhib_pop.s_NMDA_total = fft_s_NMDA[0] @network_operation(dt=100 * ms) def time_counter(t): print(t) @network_operation(dt=1 * ms) def stimulate_network(t): if t >= t_stimulus1_start and t < t_stimulus1_start + t_stimulus_duration: excit_pop.I_stim[stim1_target_idx] = stimulus_strength elif t >= t_stimulus1_start + t_stimulus_duration and t < t_stimulus1_start + t_stimulus_duration + t_delay1: excit_pop.I_stim = 0. * namp elif t >= t_stimulus1_start + t_stimulus_duration + t_delay1 and t < t_stimulus1_start + t_stimulus_duration + t_delay1 + t_stimulus_duration: excit_pop.I_stim = -1. * stimulus_strength elif t >= t_stimulus2_start - t_iti_duration and t < t_stimulus2_start: excit_pop.I_stim = 0. * namp elif t >= t_stimulus2_start and t < t_stimulus2_start + t_stimulus_duration: excit_pop.I_stim[stim2_target_idx] = stimulus_strength else: #syn_excit2excit.sgn=-1.0 # neuromodulation change excit_pop.I_stim = 0. * namp def get_monitors(pop, nr_monitored, N): nr_monitored = min(nr_monitored, (N)) idx_monitored_neurons = [ int(math.ceil(k)) for k in np.linspace(0, N - 1, nr_monitored + 2) ][1:-1] # sample(range(N), nr_monitored) # rate_monitor = PopulationRateMonitor(pop) spike_monitor = SpikeMonitor(pop, record=idx_monitored_neurons) # voltage_monitor = StateMonitor(pop, "v", record=idx_monitored_neurons) synapse_monitor = StateMonitor( syn_excit2excit, "stp", record=syn_excit2excit[stim1_center_idx, stim1_center_idx - 10:stim1_center_idx + 10], dt=1 * ms) # return rate_monitor, spike_monitor, voltage_monitor, idx_monitored_neurons, synapse_monitor return spike_monitor, synapse_monitor # collect data of a subset of neurons: spike_monitor_excit, synapse_monitor_excit = get_monitors( excit_pop, monitored_subset_size, N_excitatory) run(sim_time) return spike_monitor_excit, synapse_monitor_excit
Fourier filtering and smoothing """ import numpy as np from numpy.fft import rfft, irfft import matplotlib.pyplot as plt ''' Part A +++++++++++++++++++++++++++++++++++++++++++++''' # reading in dow data and plotting dow = np.loadtxt('dow.txt') plt.plot(dow) plt.title("Part A") plt.show() ''' Part B, C, D +++++++++++++++++++++++++++++++++++++++''' # calculating coefficients coefficients = rfft(dow) # setting first 10% to zero N = len(coefficients) coefficients[-N * 9 // 10:] = 0 # calculating inverse fourier transform dow_new = irfft(coefficients) # plotting both original and smoothed on same plot plt.plot(dow) plt.plot(dow_new) plt.title("Part B, C, D") plt.show() ''' Part E +++++++++++++++++++++++++++++++++++++++++++++'''
def simulate_wm(N_excitatory=1024, N_inhibitory=256, N_extern_poisson=1000, poisson_firing_rate=1.4 * b2.Hz, weight_scaling_factor=2., sigma_weight_profile=20., Jpos_excit2excit=1.6, stimulus_center_deg=180, stimulus_width_deg=40, stimulus_strength=0.07 * b2.namp, t_stimulus_start=0 * b2.ms, t_stimulus_duration=0 * b2.ms, distractor_center_deg=90, distractor_width_deg=40, distractor_strength=0.0 * b2.namp, t_distractor_start=0 * b2.ms, t_distractor_duration=0 * b2.ms, G_inhib2inhib=.35 * 1.024 * b2.nS, G_inhib2excit=.35 * 1.336 * b2.nS, G_excit2excit=.35 * 0.381 * b2.nS, G_excit2inhib=.35 * 1.2 * 0.292 * b2.nS, monitored_subset_size=1024, sim_time=800. * b2.ms): """ Args: N_excitatory (int): Size of the excitatory population N_inhibitory (int): Size of the inhibitory population weight_scaling_factor (float): weight prefactor. When increasing the size of the populations, the synaptic weights have to be decreased. Using the default values, we have N_excitatory*weight_scaling_factor = 2048 and N_inhibitory*weight_scaling_factor=512 N_extern_poisson (int): Size of the external input population (Poisson input) poisson_firing_rate (Quantity): Firing rate of the external population sigma_weight_profile (float): standard deviation of the gaussian input profile in the excitatory population. Jpos_excit2excit (float): Strength of the recurrent input within the excitatory population. Jneg_excit2excit is computed from sigma_weight_profile, Jpos_excit2excit and the normalization condition. stimulus_center_deg (float): Center of the stimulus in [0, 360] stimulus_width_deg (float): width of the stimulus. All neurons in stimulus_center_deg +\- (stimulus_width_deg/2) receive the same input current stimulus_strength (Quantity): Input current to the neurons at stimulus_center_deg +\- (stimulus_width_deg/2) t_stimulus_start (Quantity): time when the input stimulus is turned on t_stimulus_duration (Quantity): duration of the stimulus. distractor_center_deg (float): Center of the distractor in [0, 360] distractor_width_deg (float): width of the distractor. All neurons in distractor_center_deg +\- (distractor_width_deg/2) receive the same input current distractor_strength (Quantity): Input current to the neurons at distractor_center_deg +\- (distractor_width_deg/2) t_distractor_start (Quantity): time when the distractor is turned on t_distractor_duration (Quantity): duration of the distractor. G_inhib2inhib (Quantity): projections from inhibitory to inhibitory population (later rescaled by weight_scaling_factor) G_inhib2excit (Quantity): projections from inhibitory to excitatory population (later rescaled by weight_scaling_factor) G_excit2excit (Quantity): projections from excitatory to excitatory population (later rescaled by weight_scaling_factor) G_excit2inhib (Quantity): projections from excitatory to inhibitory population (later rescaled by weight_scaling_factor) monitored_subset_size (int): nr of neurons for which a Spike- and Voltage monitor is registered. sim_time (Quantity): simulation time Returns: results (tuple): rate_monitor_excit (Brian2 PopulationRateMonitor for the excitatory population), spike_monitor_excit, voltage_monitor_excit, idx_monitored_neurons_excit,\ rate_monitor_inhib, spike_monitor_inhib, voltage_monitor_inhib, idx_monitored_neurons_inhib,\ weight_profile_45 (The weights profile for the neuron with preferred direction = 45deg). """ # specify the excitatory pyramidal cells: Cm_excit = 0.5 * b2.nF # membrane capacitance of excitatory neurons G_leak_excit = 25.0 * b2.nS # leak conductance E_leak_excit = -70.0 * b2.mV # reversal potential v_firing_threshold_excit = -50.0 * b2.mV # spike condition v_reset_excit = -60.0 * b2.mV # reset voltage after spike t_abs_refract_excit = 2.0 * b2.ms # absolute refractory period # specify the weight profile in the recurrent population # std-dev of the gaussian weight profile around the prefered direction # sigma_weight_profile = 12.0 # std-dev of the gaussian weight profile around the prefered direction # # Jneg_excit2excit = 0 # specify the inhibitory interneurons: Cm_inhib = 0.2 * b2.nF G_leak_inhib = 20.0 * b2.nS E_leak_inhib = -70.0 * b2.mV v_firing_threshold_inhib = -50.0 * b2.mV v_reset_inhib = -60.0 * b2.mV t_abs_refract_inhib = 1.0 * b2.ms # specify the AMPA synapses E_AMPA = 0.0 * b2.mV tau_AMPA = .9 * 2.0 * b2.ms # specify the GABA synapses E_GABA = -70.0 * b2.mV tau_GABA = 10.0 * b2.ms # specify the NMDA synapses E_NMDA = 0.0 * b2.mV tau_NMDA_s = .65 * 100.0 * b2.ms # orig: 100 tau_NMDA_x = .94 * 2.0 * b2.ms alpha_NMDA = 0.5 * b2.kHz # projections from the external population G_extern2inhib = 2.38 * b2.nS G_extern2excit = 3.1 * b2.nS # projectsions from the inhibitory populations G_inhib2inhib *= weight_scaling_factor G_inhib2excit *= weight_scaling_factor # projections from the excitatory population G_excit2excit *= weight_scaling_factor G_excit2inhib *= weight_scaling_factor # todo: verify this scaling t_stimulus_end = t_stimulus_start + t_stimulus_duration t_distractor_end = t_distractor_start + t_distractor_duration # compute the simulus index stim_center_idx = int(round(N_excitatory / 360. * stimulus_center_deg)) stim_width_idx = int(round(N_excitatory / 360. * stimulus_width_deg / 2)) stim_target_idx = [ idx % N_excitatory for idx in range(stim_center_idx - stim_width_idx, stim_center_idx + stim_width_idx + 1) ] # compute the distractor index distr_center_idx = int(round(N_excitatory / 360. * distractor_center_deg)) distr_width_idx = int(round(N_excitatory / 360. * distractor_width_deg / 2)) distr_target_idx = [ idx % N_excitatory for idx in range(distr_center_idx - distr_width_idx, distr_center_idx + distr_width_idx + 1) ] # precompute the weight profile for the recurrent population tmp = math.sqrt(2. * math.pi) * sigma_weight_profile * erf( 180. / math.sqrt(2.) / sigma_weight_profile) / 360. Jneg_excit2excit = (1. - Jpos_excit2excit * tmp) / (1. - tmp) presyn_weight_kernel = \ [(Jneg_excit2excit + (Jpos_excit2excit - Jneg_excit2excit) * math.exp(-.5 * (360. * min(j, N_excitatory - j) / N_excitatory) ** 2 / sigma_weight_profile ** 2)) for j in range(N_excitatory)] # validate the normalization condition: (360./N_excitatory)*sum(presyn_weight_kernel)/360. fft_presyn_weight_kernel = rfft(presyn_weight_kernel) weight_profile_45 = deque(presyn_weight_kernel) rot_dist = int(round(len(weight_profile_45) / 8)) weight_profile_45.rotate(rot_dist) # define the inhibitory population inhib_lif_dynamics = """ s_NMDA_total : 1 # the post synaptic sum of s. compare with s_NMDA_presyn dv/dt = ( - G_leak_inhib * (v-E_leak_inhib) - G_extern2inhib * s_AMPA * (v-E_AMPA) - G_inhib2inhib * s_GABA * (v-E_GABA) - G_excit2inhib * s_NMDA_total * (v-E_NMDA)/(1.0+1.0*exp(-0.062*v/volt)/3.57) )/Cm_inhib : volt (unless refractory) ds_AMPA/dt = -s_AMPA/tau_AMPA : 1 ds_GABA/dt = -s_GABA/tau_GABA : 1 """ inhib_pop = NeuronGroup(N_inhibitory, model=inhib_lif_dynamics, threshold="v>v_firing_threshold_inhib", reset="v=v_reset_inhib", refractory=t_abs_refract_inhib, method="rk2") # initialize with random voltages: inhib_pop.v = numpy.random.uniform(v_reset_inhib / b2.mV, high=v_firing_threshold_inhib / b2.mV, size=N_inhibitory) * b2.mV # set the connections: inhib2inhib syn_inhib2inhib = Synapses(inhib_pop, target=inhib_pop, on_pre="s_GABA += 1.0", delay=0.0 * b2.ms) syn_inhib2inhib.connect(condition="i!=j", p=1.0) # set the connections: extern2inhib input_ext2inhib = PoissonInput(target=inhib_pop, target_var="s_AMPA", N=N_extern_poisson, rate=poisson_firing_rate, weight=1.0) # specify the excitatory population: excit_lif_dynamics = """ I_stim : amp s_NMDA_total : 1 # the post synaptic sum of s. compare with s_NMDA_presyn dv/dt = ( - G_leak_excit * (v-E_leak_excit) - G_extern2excit * s_AMPA * (v-E_AMPA) - G_inhib2excit * s_GABA * (v-E_GABA) - G_excit2excit * s_NMDA_total * (v-E_NMDA)/(1.0+1.0*exp(-0.062*v/volt)/3.57) + I_stim )/Cm_excit : volt (unless refractory) ds_AMPA/dt = -s_AMPA/tau_AMPA : 1 ds_GABA/dt = -s_GABA/tau_GABA : 1 ds_NMDA/dt = -s_NMDA/tau_NMDA_s + alpha_NMDA * x * (1-s_NMDA) : 1 dx/dt = -x/tau_NMDA_x : 1 """ excit_pop = NeuronGroup(N_excitatory, model=excit_lif_dynamics, threshold="v>v_firing_threshold_excit", reset="v=v_reset_excit; x+=1.0", refractory=t_abs_refract_excit, method="rk2") # initialize with random voltages: excit_pop.v = numpy.random.uniform(v_reset_excit / b2.mV, high=v_firing_threshold_excit / b2.mV, size=N_excitatory) * b2.mV excit_pop.I_stim = 0. * b2.namp # set the connections: extern2excit input_ext2excit = PoissonInput(target=excit_pop, target_var="s_AMPA", N=N_extern_poisson, rate=poisson_firing_rate, weight=1.0) # set the connections: inhibitory to excitatory syn_inhib2excit = Synapses(inhib_pop, target=excit_pop, on_pre="s_GABA += 1.0") syn_inhib2excit.connect(p=1.0) # set the connections: excitatory to inhibitory NMDA connections syn_excit2inhib = Synapses( excit_pop, inhib_pop, model="s_NMDA_total_post = s_NMDA_pre : 1 (summed)", method="rk2") syn_excit2inhib.connect(p=1.0) # # set the connections: UNSTRUCTURED excitatory to excitatory # syn_excit2excit = Synapses(excit_pop, excit_pop, # model= "s_NMDA_total_post = s_NMDA_pre : 1 (summed)", method="rk2") # syn_excit2excit.connect(condition="i!=j", p=1.) # set the STRUCTURED recurrent input. use a network_operation @network_operation() def update_nmda_sum(): fft_s_NMDA = rfft(excit_pop.s_NMDA) fft_s_NMDA_total = numpy.multiply(fft_presyn_weight_kernel, fft_s_NMDA) s_NMDA_tot = irfft(fft_s_NMDA_total) excit_pop.s_NMDA_total_ = s_NMDA_tot @network_operation(dt=1 * b2.ms) def stimulate_network(t): if t >= t_stimulus_start and t < t_stimulus_end: # excit_pop[stim_start_i - 15:stim_start_i + 15].I_stim = 0.25 * b2.namp # Todo: review indexing # print("stim on") excit_pop.I_stim[stim_target_idx] = stimulus_strength else: # print("stim off") excit_pop.I_stim = 0. * b2.namp # add distractor if t >= t_distractor_start and t < t_distractor_end: excit_pop.I_stim[distr_target_idx] = distractor_strength def get_monitors(pop, nr_monitored, N): nr_monitored = min(nr_monitored, (N)) idx_monitored_neurons = \ [int(math.ceil(k)) for k in numpy.linspace(0, N - 1, nr_monitored + 2)][1:-1] # sample(range(N), nr_monitored) rate_monitor = PopulationRateMonitor(pop) # record= some_list is not supported? :-( spike_monitor = SpikeMonitor(pop, record=idx_monitored_neurons) voltage_monitor = StateMonitor(pop, "v", record=idx_monitored_neurons) return rate_monitor, spike_monitor, voltage_monitor, idx_monitored_neurons # collect data of a subset of neurons: rate_monitor_inhib, spike_monitor_inhib, voltage_monitor_inhib, idx_monitored_neurons_inhib = \ get_monitors(inhib_pop, monitored_subset_size, N_inhibitory) rate_monitor_excit, spike_monitor_excit, voltage_monitor_excit, idx_monitored_neurons_excit = \ get_monitors(excit_pop, monitored_subset_size, N_excitatory) b2.run(sim_time) return \ rate_monitor_excit, spike_monitor_excit, voltage_monitor_excit, idx_monitored_neurons_excit,\ rate_monitor_inhib, spike_monitor_inhib, voltage_monitor_inhib, idx_monitored_neurons_inhib,\ weight_profile_45