def process(fname): Fs, x = wavfile.read(fname) a = string.split(fname,".wav") b = string.split(a[0],"cw") sys.stdout.write(b[1]) sys.stdout.write(",") # find frequency peaks of high volume CW signals if fft_scan: f,s = periodogram(x,Fs,'blackman',4096,'linear',False,scaling='spectrum') # download peakdetect from # https://gist.github.com/endolith/250860 from peakdetect import peakdet threshold = max(s)*0.4 # only 0.4 ... 1.0 of max value freq peaks included maxtab, mintab = peakdet(abs(s[0:len(s)/2-1]), threshold,f[0:len(f)/2-1] ) if plotter: plt.plot(f[0:len(f)/2-1],abs(s[0:len(s)/2-1]),'g-') print maxtab from matplotlib.pyplot import plot, scatter, show scatter(maxtab[:,0], maxtab[:,1], color='blue') plt.show() # process all CW stations with higher than threshold volume if fft_scan: for freq in maxtab[:,0]: print "\nfreq:%5.2f" % freq demodulate(x,Fs,freq) else: demodulate(x,Fs,MORSE_FREQUENCY)
def harmonicradar_fmcw(t,tm,fs,f0,f1,range_m): bm = f1-f0 ttxt = 'FMCW: {} -> {} Hz'.format(f0,f1) #x = chirp(t,f0,t[-1],f1,'linear') #radar transmit waveform #%% target harmonic radar tag #t_targ = 2 * asarray(range_m)/c #round trip time delay due to target distance #phase_targ = 2*pi*linspace(f0,f1,x.size)*t_targ #y = chirp(t,f0,t[-1],f1,'linear',phi=degrees(phase_targ)) y,x = chirp(bm,tm,t,range_m, Atarg=1, nlfm=0.) #y[y<0]=0. #tag transmit waveform fax,Pxx = periodogram(x,fs) fax,Pyy = periodogram(y,fs,detrend=False) plots(fax,None,Pyy,fc,ttxt) #%% radar received waveform (homodyne) z = y * x.conjugate()#tag transmit waveform #%% antialias filter & resample b = firwin(numtaps=100, cutoff=fs/4., nyq=fs/2.) # LPF zf = lfilter( b, 1., z) zr,tz = resample(zf,int(zf.size * scfs / fs),t) faz,Pzz = periodogram(zr,scfs,detrend=False) plotif(tz,zr,faz,Pzz) sounds(zr)
def test_real_spectrum(self): x = np.zeros(16) x[0] = 1 f, p = periodogram(x, scaling='spectrum') g, q = periodogram(x, scaling='density') assert_allclose(f, np.linspace(0, 0.5, 9)) assert_allclose(p, q/16.0)
def test_window_external(self): x = np.zeros(16) x[0] = 1 f, p = periodogram(x, 10, 'hann') win = signal.get_window('hann', 16) fe, pe = periodogram(x, 10, win) assert_array_almost_equal_nulp(p, pe) assert_array_almost_equal_nulp(f, fe)
def test_padded_fft(self): x = np.zeros(16) x[0] = 1 f, p = periodogram(x) fp, pp = periodogram(x, nfft=32) assert_allclose(f, fp[::2]) assert_allclose(p, pp[::2]) assert_array_equal(pp.shape, (17,))
def test_empty_input(self): f, p = periodogram([]) assert_array_equal(f.shape, (0,)) assert_array_equal(p.shape, (0,)) for shape in [(0,), (3,0), (0,5,2)]: f, p = periodogram(np.empty(shape)) assert_array_equal(f.shape, shape) assert_array_equal(p.shape, shape)
def test_nd_axis_0(self): x = np.zeros(20, dtype=np.float64) x = x.reshape((10,2,1)) x[0,:,:] = 1.0 f, p = periodogram(x, axis=0) assert_array_equal(p.shape, (6,2,1)) assert_array_almost_equal_nulp(p[:,0,0], p[:,1,0], 60) f0, p0 = periodogram(x[:,0,0]) assert_array_almost_equal_nulp(p0, p[:,1,0])
def test_nd_axis_m1(self): x = np.zeros(20, dtype=np.float64) x = x.reshape((2,1,10)) x[:,:,0] = 1.0 f, p = periodogram(x) assert_array_equal(p.shape, (2, 1, 6)) assert_array_almost_equal_nulp(p[0,0,:], p[1,0,:], 60) f0, p0 = periodogram(x[0,0,:]) assert_array_almost_equal_nulp(p0[np.newaxis,:], p[1,:], 60)
def get_gain(output_sig,input_sig): """function to calculate the gain, of the output relative of the input""" f1, Pxx1 = signal.periodogram(input_sig[:3000],48000) f2, Pxx2 = signal.periodogram(output_sig,48000) input_val = Pxx1[0] output_val = Pxx2[0] gain = (input_val/output_val)*100 return gain
def test_window_external(self): x = np.zeros(16) x[0] = 1 f, p = periodogram(x, 10, 'hann') win = signal.get_window('hann', 16) fe, pe = periodogram(x, 10, win) assert_array_almost_equal_nulp(p, pe) assert_array_almost_equal_nulp(f, fe) win_err = signal.get_window('hann', 32) assert_raises(ValueError, periodogram, x, 10, win_err) # win longer than signal
def test_words(): idx = idx[:10] word_idx, word_map = word_load() word_graph = stack_graph(word_idx) flipped_map = flip(word_map) word_path = traverse_stack_graph(word_graph, word_idx[1]) print " ".join(path_to_words(word_path[1000:5000], flipped_map)) plt.plot(np.array(word_idx[:500])) plt.plot(np.array(word_path[:500])) f, pxx_den = sci_sig.periodogram(xs_new) f2, pxx_den2 = sci_sig.periodogram(xs) plt.semilogy(f, pxx_den) plt.semilogy(f2, pxx_den2) plt.show()
def harmonicradar_cw(t,fs,fc): ttxt = 'CW: {} Hz'.format(fc) #%% input x = sin(2*pi*fc*t) _,Pxx = periodogram(x,fs) #%% diode d = (square(2*pi*fc*t)) d[d<0] = 0. #%% output of diode y = x * d #y = x; y[y<0]=0. #shorthand way to say it, same result fax,Pyy = periodogram(y,fs) #%% results plotlf(t,x,d,y,ttxt) plots(fax,Pxx,Pyy,fc,ttxt)
def plot_psd_welch(data,fs,filename,flag): # Estimate PSD using Welchs method. Divides the data into overlapping segments, #computing a modified periodogram for each segment and overlapping the periodograms plt.figure(figsize=(10,10)) fig, (ax0,ax1) = plt.subplots(nrows=2) f, Pxx_den = signal.periodogram (data, fs) ax0.set_xlabel('frequency [Hz]') ax0.set_ylabel('periodogram') ax0.plot(f, Pxx_den) print "Periodogram" print "f is " , f print "Pxx_den is ", Pxx_den f2, Pxx_den2 = signal.welch(data, fs) ax1.set_ylabel('PSD [V**2/Hz]') print "Welch method " print "f2 is ", f2 print "Pxx_den2 is" , Pxx_den2 ax1.plot(f2, Pxx_den2) if flag==1: ax0.set_yscale('log') ax1.set_yscale('log') ax0.set_ylabel('periodogram (log scale)') ax1.set_ylabel('Welch\'s PSD [V**2/Hz] (log scale) ') plt.savefig(filename+'.pdf')
def periodogramPlot(ySeries,plotName="Plot",xAxisName="Frequency",yAxisName="Frequency Strength"): trans = signal.periodogram(ySeries) plt.title(plotName) plt.xlabel(xAxisName) plt.ylabel(yAxisName) plt.plot(trans[0], trans[1], color='green') plt.show()
def plot_pulser_ffts(wfFileName): if os.path.isfile(wfFileName): print("Loading wf file {0}".format(wfFileName)) data = np.load(wfFileName, encoding="latin1") wfs = data['wfs'] avg_count = 0 for wf in wfs: wf.window_waveform(time_point=0.95, early_samples=125, num_samples=1000) wf_data = wf.windowed_wf / wf.amplitude # plt.plot(wf_data) top_idx = 140#np.argmax( wf_data > 1000) + 15 wf_top = wf_data[ top_idx:top_idx+800 ] xf,power = signal.periodogram(wf_top, fs=1E8, detrend="linear") plt.semilogx(xf,power) # # # # avg_wf += wf_top # avg_count +=1 # # if avg_count >= pulser_count: break plt.xlim(10**6, 0.5*10**8) plt.show()
def analyzeBinergy(grain): rate, data = wav.read(grain["file"]) numBins = 20 data = numpy.array(data, dtype=float) #data = data * numpy.hanning(float(grain["frameCount"])) f, Pxx_den = signal.periodogram(data, fs=rate, window='hanning', return_onesided=True, scaling='spectrum', axis=-1) if(len(Pxx_den) < 50): return None try: Pxx_den = 10 * numpy.log10(Pxx_den) except Exception: return None binWidth = len(Pxx_den) / numBins energy = [0] * numBins for binNum in range(numBins): for binIndex in range(binWidth): if binIndex <= (binWidth / 2): slope = 1 / float((binWidth / 2)) energy[binNum] += Pxx_den[binIndex + (binNum * binWidth)] * (slope * binIndex) else: slope = -1 / float(binWidth / 2) energy[binNum] += Pxx_den[binIndex + (binNum * binWidth)] * (slope * (binIndex - (binWidth / 2))) return energy
def PlotPowerSpectrum(electrode_slices, exact_sr, freq_min = 0, freq_max = 100, mode = 'welch', name = '', save_path = ""): """Plot average and individual traces of power spectrum for signal epochs. Parameters ---------- electrode_slices: dict Key is an event name, values are signal epochs. exact_sr: float exact sampling rate info from the EEG amplifier freq_min, freq_max : int, optional lower and upper frequency limits for plotting (default 0, 50 Hz) mode: {'period', 'welch'} Default is period which produces periodogram. change to 'welch' for alternative method of power estimation. name : str, optional title of the figure Returns ------- power_density : dict keys are event names. Under each key there are two np.arrays. One array stores info about frequency bins, second has power densities for all trials separately. """ #sns.set() #sns.set_palette("hls") fig, axes = plt.subplots(1) fig.suptitle(name) print(name) palette = itertools.cycle(sns.color_palette()) power_density = {} for name, event in electrode_slices.items(): if(mode=='welch'): f, Pxx_den = signal.welch(event, exact_sr, nperseg=512) elif mode=='period': f, Pxx_den = signal.periodogram(event, exact_sr) min_idx = np.argmax(f > freq_min) max_idx = np.argmax(f > freq_max) g = sns.tsplot(data=Pxx_den[:,min_idx:max_idx], time = f[min_idx:max_idx], err_style="unit_traces", condition = name, color =next(palette), ax = axes) # g.fig.suptitle() axes.set_yticklabels(labels = f[min_idx: max_idx], rotation = 0) axes.set_ylabel(mode+' Power Density') axes.set_xlabel('frequency') power_density[name] = (f[min_idx:max_idx], Pxx_den[:,min_idx:max_idx]) if(save_path != ""): fig.savefig(save_path) return power_density
def periodogram(x, *args, detrend='diff', **kwargs): """ Return periodogram of signal `x`. Parameters ---------- x: array_like A 1D signal. detrend: 'diff' or False or int Remove trend from x. If int, fit and subtract a polynomial of this order. See also: `statsmodels.tsa.detrend`. args, kwargs: As accepted by `scipy.signal.periodogram`. Returns ------- periods: array_like The periods at which the spectral density is calculated. pgram: array_like Power spectral density of x. """ from scipy.signal import periodogram x = _detrend(x, detrend) freqs, pgram = periodogram(x, *args, detrend=False, **kwargs) SKIP = len(x) // 1000 # HACK: For long series, the first few frequency/period values are "unstable". freqs, pgram = freqs[SKIP:], pgram[SKIP:] periods = 1 / freqs periods, pgram = _significant_periods(periods, pgram) return periods, pgram
def update(self): if self.lfp_dock.isVisible() or self.fft_dock.isVisible(): if( self.fft_l < S_CONFIG['FFT_L_PAQ']): self.data_fft_aux[self.fft_l*CONFIG['PAQ_USB']:(1+self.fft_l)*CONFIG['PAQ_USB']] = self.data_handler.data_new[self.channel, :] self.fft_l += 1 else: self.fft_l = 0 if (self.fft_n < S_CONFIG['FFT_N']): self.fft_frec,self.fft_aux[self.fft_n, :] = periodogram(self.data_fft_aux, fs=float(CONFIG['FS']),nfft = FFT_SIZE,scaling='spectrum') self.fft_n += 1 else: self.fft_n = 0 spectrum = np.mean(self.fft_aux, 0) if self.fft_dock.isVisible(): self.Scurve.setPen(CH_COLORS[self.channel%CONFIG['ELEC_GROUP']]) self.Scurve.setData(x = self.fft_frec, y = spectrum) if self.lfp_dock.isVisible(): fo = self.fft_frec[0] df = self.fft_frec[1] - self.fft_frec[0] for i in self.x_lfp: self.seg_rel[i] = S_SEGMENTS[i].calc_S(fo,df,spectrum) self.LFPbars.setOpts(x=self.x_lfp, width=0.8, y0=0, y1=self.seg_rel/spectrum.sum(), brushes=LFP_COLORS, pens=LFP_COLORS)
def plot_periodogram(d, sampling_freq, col='b'): f, p = signal.periodogram(d, sampling_freq, scaling='spectrum') db = numpy.log10(p) * 10 plot.ylim([-180, 0]) plot.xlim([0, 22000]) # plot.xscale('log') plot.plot(f, db, col)
def debugfunc(): #threshold - 3e7 ? Fs, arr = wav.read('music/2khzsine.wav') dt = 1.0 / Fs print Fs print dt print arr.shape fourier = np.fft.fft(arr) N = arr.size print N freq = np.fft.fftfreq(N, dt) idx = np.argsort(freq) #pyplt.plot(freq[idx], np.abs(fourier[idx])) #pyplt.show() #threshold - 15000? B,A = signal.butter(2, 2000 / (Fs / 2.0), btype='lowpass', analog=False, output='ba') arrfilt = signal.filtfilt(B,A,arr) fig = pyplt.figure() ax1 = fig.add_subplot(211) time = np.arange(0, 1.0 * arr.size / Fs, dt) print time.shape print arr.shape print arrfilt.shape #pyplt.plot(time[0:100], arr[0:100], 'b-') #pyplt.plot(time[0:100], arrfilt[0:100], 'r-') #pyplt.show() #threshold - 11300? f, pxx_spec = signal.periodogram(arr, Fs, 'flattop', scaling='spectrum') print arr.max() print np.sqrt(pxx_spec.max())
def do(self): self.time = time.strftime('%Y-%m-%d_%H%M%S') self.timestamp = time.strftime("%Y-%m-%d @ %I:%M:%S%p") self.setup_preamp() Nfft = int((self.measure_freq*self.measure_time / 2) + 1) psdAve = np.zeros(Nfft) for i in range(self.averages): self.V, self.t = self.daq.monitor('ai%i' %self.input_chan, self.measure_time, self.measure_freq) self.V = self.V['ai%i' %self.input_chan] #extract data from the required channel self.f, psd = signal.periodogram(self.V, self.measure_freq, 'blackmanharris') psdAve = psdAve + psd psdAve = psdAve / self.averages # normalize by the number of averages self.psdAve = np.sqrt(psdAve) # spectrum in V/sqrt(Hz) self.notes = input('Notes for this spectrum: ') self.setup_plots() self.plot_loglog() self.plot_semilog() self.save()
def test_integer_twosided(self): x = np.zeros(16, dtype=int) x[0] = 1 f, p = periodogram(x, return_onesided=False) assert_allclose(f, fftpack.fftfreq(16, 1.0)) q = np.ones(16)/16.0 q[0] = 0 assert_allclose(p, q)
def spectrum_fbm(): fbm = np.load("quick_fbm.npy") f, Pxx = sci_sig.periodogram(fbm) plt.close() plt.loglog(f, np.sqrt(Pxx)) plt.xlabel("frequency") plt.ylabel("PSD") plt.savefig("spectrum_fbm", bbox_inches="tight")
def plotperiodogram(t: np.ndarray, x: np.ndarray, fs: int, ax, ttxt): fax, Pxx = periodogram(x, fs, 'hanning') ax[0].plot(fax, 10*np.log10(abs(Pxx))) ax[0].set_title(ttxt) ax[1].plot(t, x) ax[1].set_ylim(-1, 1) ax[1].set_xlabel('time [sec.]')
def spectrum_vr(): vr = open_vr() f, Pxx = sci_sig.periodogram(vr) plt.close() plt.loglog(f, np.sqrt(Pxx)) plt.xlabel("frequency") plt.ylabel("PSD") plt.savefig("spectrum_vr", bbox_inches="tight")
def spectrum_sinusoid(): sinusoid = np.sin(np.linspace(0, 40 * np.pi, 1500)) f, Pxx = sci_sig.periodogram(sinusoid) plt.close() plt.loglog(f, np.sqrt(Pxx)) plt.xlabel("frequency") plt.ylabel("PSD") plt.savefig("spectrum_sinusoid", bbox_inches="tight")
def spectrum_logistic(): logit = gen_logistic_map(1500) f, Pxx = sci_sig.periodogram(logit) plt.close() plt.loglog(f, np.sqrt(Pxx)) plt.xlabel("frequency") plt.ylabel("PSD") plt.savefig("spectrum_logistic", bbox_inches="tight")
def test_complex(self): x = np.zeros(16, np.complex128) x[0] = 1.0 + 2.0j f, p = periodogram(x, return_onesided=False) assert_allclose(f, fftpack.fftfreq(16, 1.0)) q = 5.0*np.ones(16)/16.0 q[0] = 0 assert_allclose(p, q)
def test_integer_odd(self): x = np.zeros(15, dtype=int) x[0] = 1 f, p = periodogram(x) assert_allclose(f, np.arange(8.0)/15.0) q = np.ones(8) q[0] = 0 q *= 2.0/15.0 assert_allclose(p, q, atol=1e-15)
def plot_distanceintegral(savename, plotname, rmcenter=False, onlygreen=False): if os.path.exists(savename): with open(savename, 'rb') as savefile: area = pickle.load(savefile) else: # For samping over the absolute magnitude distribution iso = gaia_rc.load_iso() Gsamples = gaia_rc.sample_Gdist(iso, n=_NGSAMPLES) # l and b of the pixels theta, phi = healpy.pixelfunc.pix2ang( _NSIDE, numpy.arange(healpy.pixelfunc.nside2npix(_NSIDE)), nest=False) cosb = numpy.sin(theta) area= multi.parallel_map(lambda x: distanceIntegrand(\ dust._GREEN15DISTS[x],cosb,Gsamples,rmcenter,onlygreen), range(len(dust._GREEN15DISTS)), numcores=numpy.amin([16, len(dust._GREEN15DISTS), multiprocessing.cpu_count()])) save_pickles(savename, area) # Plot the power spectrum if True: psdx, psd = signal.periodogram( area * dust._GREEN15DISTS**3. / numpy.sum(area * dust._GREEN15DISTS**3.), fs=1. / (dust._GREEN15DISTMODS[1] - dust._GREEN15DISTMODS[0]), detrend=lambda x: x, scaling='spectrum') bovy_plot.bovy_print(fig_height=3.) matplotlib.rcParams['text.latex.preamble'] = [r"\usepackage{yfonts}"] bovy_plot.bovy_plot(psdx[1:], psd[1:], 'k-', loglog=True, xlabel=r'$2\pi\,k_\mu\,(\mathrm{mag}^{-1})$', ylabel=r'$P_k$', xrange=[0.04, 4.]) bovy_plot.bovy_text( r'$\mathrm{normalized}\ D^3\,\nu_*(\mu|\theta)\,\textswab{S}(\mu)$', bottom_left=True, size=16.) bovy_plot.bovy_end_print(plotname) else: bovy_plot.bovy_print(fig_height=3.) matplotlib.rcParams['text.latex.preamble'] = [r"\usepackage{yfonts}"] bovy_plot.bovy_plot( dust._GREEN15DISTMODS, area * dust._GREEN15DISTS**3., 'k-', xlabel=r'$\mu\,(\mathrm{mag}^{-1})$', ylabel=r'$D^3\,\nu_*(\mu|\theta)\,\textswab{S}(\mu)$') bovy_plot.bovy_end_print(plotname) spl = interpolate.InterpolatedUnivariateSpline(dust._GREEN15DISTMODS, area * dust._GREEN15DISTS**3., k=5) fthder = [spl.derivatives(dm)[4] for dm in dust._GREEN15DISTMODS] print "Simpson error= %g, volume= %g" % ( 0.5**4. / 180. * numpy.mean(numpy.fabs(fthder)) / integrate.simps(area * dust._GREEN15DISTS**3., dx=0.5), numpy.sum(area * dust._GREEN15DISTS**3.)) return None
# the mean mean = 0 # the alpha α = 0.3 # the standard deviation std = (1 - α**2)**0.5 Y[0] = numpy.random.normal(mean, std, size=1) # set up an initial iteration variable i = 0 while i <= N - 1: X = numpy.random.normal(mean, std, size=1) Y[i + 1] = α * Y[i] + X i = i + 1 # times of sampling samp = 400 freqs, P_xx = signal.periodogram(Y, samp, scaling='density') # plot the graph plt.plot(freqs, P_xx) plt.show() # Problem 1 - (c) # a = 0.95 import numpy import matplotlib.pyplot as plt from scipy import signal # set up the limit for the problem N = 300 # set up an all-zero variable Y = numpy.zeros(N + 1) # the mean
yout[i, Nmitral:] = cellout(y[i, Nmitral:], Sy, Sy2, th) #first, reinitialize lastnoise & noise noise = np.zeros((Ndim, 1)) lastnoise = np.zeros((Ndim, 1)) lastnoise = lastnoise + t_inh - noisewidth Sh = np.zeros(np.shape(yout)) for i in np.arange(Ndim): Sh[:, i] = np.convolve(yout[:, i], hpflter, mode='same') #Calculate IPR P_den = np.zeros((198, Ndim)) for i in np.arange(Nmitral): f, P_den[:, i] = signal.periodogram(Sh[:, i]) #periodogram returns a list of #frequencies and the power density psi = np.zeros(Nmitral) for i in np.arange(Nmitral): psi[i] = np.sum(P_den[:, i]) psi = psi / np.sqrt(np.sum(psi**2)) IPR1 = 1 / np.sum(psi**4) pwr = np.sum(P_den) / Nmitral updated = 0 ###############Start Updating######################################### while pwr < .2: H0trial = np.copy(H0)
def dmg_seed_50_2D(colnum): #INITIALIZING STUFF Nmitral = 50 Ngranule = np.copy(Nmitral) #number of granule cells pg. 383 of Li/Hop Ndim = Nmitral + Ngranule #total number of cells # t_inh = 25 ; # time when inhalation starts # t_exh = 205; #time when exhalation starts # Ndamagetotal = Nmitral*2 + 1 #number of damage steps Ndamage = 18 # Ncols = int(Nmitral/10) #define number of columns to damage finalt = 395 # end time of the cycle #y = zeros(ndim,1); P_odor0 = np.zeros((Nmitral, 1)) #odor pattern, no odor P_odor1 = P_odor0 + .00429 #Odor pattern 1 # P_odor2 = 1/70*np.array([.6,.5,.5,.5,.3,.6,.4,.5,.5,.5]) # P_odor3 = 4/700*np.array([.7,.8,.5,1.2,.7,1.2,.8,.7,.8,.8]) #control_odor = control_order + .00429 #control_odor = np.zeros((Nmitral,1)) #odor input for adaptation #controllevel = 1 #1 is full adaptation H0 = np.zeros((Nmitral, Ngranule)) #weight matrix: to mitral from granule W0 = np.zeros((Ngranule, Nmitral)) #weights: to granule from mitral H0 = np.load('H0_50_2D_60Hz.npy') #load weight matrix W0 = np.load('W0_50_2D_60Hz.npy') #load weight matrix W0_tot = np.sum(W0) #get sum of the weights #H0 = H0 + H0*np.random.rand(np.shape(H0)) #W0 = W0+W0*np.random.rand(np.shape(W0)) M = 5 #average over 5 trials for each level of damage #initialize iterative variables d1it, d2it, d3it, d4it = np.zeros(M), np.zeros(M), np.zeros(M), np.zeros(M) IPRit, IPR2it, pnit = np.zeros(M), np.zeros(M), np.zeros(M) frequencyit = np.zeros(M) pwrit, pwr_bit, pwr_gamit = np.zeros(M), np.zeros(M), np.zeros(M) yout2, Sh2 = np.zeros((finalt, Ndim)), np.zeros((finalt, Ndim)) # Sbp_b,Sbp_gam = np.zeros((finalt,Ndim)),np.zeros((finalt,Ndim)) psi = np.copy(Sh2[:, :Nmitral]) #initialize quantities to be returned at end of the process dmgpct1 = np.zeros(Ndamage) eigfreq1 = np.zeros(Ndamage) d11 = np.zeros(Ndamage) d21 = np.zeros(Ndamage) d31 = np.zeros(Ndamage) d41 = np.zeros(Ndamage) pwr1 = np.zeros(Ndamage) # pwr_b,pwr_gam = np.zeros(Ndamage),np.zeros(Ndamage) IPR1 = np.zeros(Ndamage) IPR2 = np.zeros(Ndamage) pn1 = np.zeros(Ndamage) freq1 = np.zeros(Ndamage) cell_act = np.zeros((finalt, Ndim, Ndamage)) Wdamagestep = .1 * W0 Wdamaged = np.copy(W0) # spread = -1 #start at -1 so that the first damage level has a spread of 0 radius damage = 0 dmgcols = [colnum] #Get the base response first Omean1,Oosci1,Omeanbar1,Ooscibar1 = np.zeros((Nmitral,M))+0j,\ np.zeros((Nmitral,M))+0j,np.zeros(M)+0j,np.zeros(M)+0j for m in np.arange(M): yout,y0out,Sh,t,OsciAmp1,Omean1[:,m],Oosci1[:,m],Omeanbar1[m],\ Ooscibar1[m],freq0,maxlam = olf_bulb_10(Nmitral,H0,W0,P_odor1) for lv in np.arange(Ndamage): #reinitialize all iterative variables to zero (really only need to do for distance measures, but good habit) d1it, d2it, d3it, d4it = np.zeros(M), np.zeros(M), np.zeros( M), np.zeros(M) IPRit, IPR2it, pnit = np.zeros(M), np.zeros(M), np.zeros(M) frequencyit = np.zeros(M) pwrit = np.zeros(M) if lv > 0: if lv > 1: newcols = [] for i in dmgcols: newcols.extend([int(np.mod(i+1,5)),int(np.mod(i-1,5)),\ int(np.mod(i+5,Nmitral)),int(np.mod(i-5,Nmitral))]) for n in newcols: if not (n in dmgcols): dmgcols.extend([n]) Wdamaged[:, dmgcols] = Wdamaged[:, dmgcols] - Wdamagestep[:, dmgcols] Wdamaged[Wdamaged < 0] = 0 damage = np.sum(W0 - Wdamaged) for m in np.arange(M): #Then get respons of damaged network yout2[:,:],y0out2,Sh2[:,:],t2,OsciAmp2,Omean2,Oosci2,Omeanbar2,\ Ooscibar2,freq2,grow_eigs2 = olf_bulb_10(Nmitral,H0,Wdamaged,P_odor1) #calculate distance measures print(time.time() - tm1) for i in np.arange(M): d1it[m] += 1 - Omean1[:, m].dot(Omean2) / ( lin.norm(Omean1[:, m]) * lin.norm(Omean2)) d2it[m] += 1 - lin.norm(Oosci1[:, m].dot(np.conjugate( Oosci2))) / (lin.norm(Oosci1[:, m]) * lin.norm(Oosci2)) d3it[m] += (Omeanbar1[m] - Omeanbar2) / (Omeanbar1[m] + Omeanbar2) d4it[m] += np.real( (Ooscibar1[m] - Ooscibar2) / (Ooscibar1[m] + Ooscibar2)) d1it[m] = d1it[m] / M d2it[m] = d2it[m] / M d3it[m] = d3it[m] / M d4it[m] = d4it[m] / M #calculate spectral density and "wave function" to get average power and IPR P_den = np.zeros( (501, Nmitral)) #only calculate the spectral density from for i in np.arange( Nmitral): #t=125 to t=250, during the main oscillations f, P_den[:, i] = signal.periodogram(Sh2[125:250, i], nfft=1000, fs=1000) psi = np.zeros(Nmitral) for p in np.arange(Nmitral): psi[p] = np.sum(P_den[:, p]) psi = psi / np.sqrt(np.sum(psi**2)) # P_den_b = np.zeros((501,Nmitral)) # for i in np.arange(Nmitral): # f, P_den_b[:,i] = signal.periodogram(Sbp_b[125:250,i],nfft=1000,fs=1000) #periodogram returns a list of # #frequencies and the power density # P_den_gam = np.zeros((501,Nmitral)) # for i in np.arange(Nmitral): # f, P_den_gam[:,i] = signal.periodogram(Sbp_gam[125:250,i],nfft=1000,fs=1000) #periodogram returns a list of # #frequencies and the power density psi2 = np.copy(OsciAmp2) psi2 = psi2 / np.sqrt(np.sum(psi2**2)) maxAmp = np.max(OsciAmp2) pnit[m] = len(OsciAmp2[OsciAmp2 > maxAmp / 2]) IPRit[m] = 1 / np.sum(psi**4) IPR2it[m] = 1 / np.sum(psi2**4) pwrit[m] = np.sum(P_den) / Nmitral # pwr_bit[m] = np.sum(P_den_b)/Nmitral # pwr_gamit[m] = np.sum(P_den_gam)/Nmitral #get the frequency according to the adiabatic analysis maxargs = np.argmax(P_den, axis=0) argf = stats.mode(maxargs[maxargs != 0]) frequencyit[m] = f[argf[0][0]] # print(cols) # print(time.time()-tm1) # # print('level',lv) #Get the returned variables for each level of damage dmgpct1[lv] = damage / W0_tot IPR1[lv] = np.average(IPRit) pwr1[lv] = np.average(pwrit) # pwr_b[lv] = np.average(pwr_bit) # pwr_gam[lv] = np.average(pwr_gamit) freq1[lv] = np.average(frequencyit) #0,1,2,3,4...Ndamage-1, then #col 1 damage level 0,1,2... # IPRsd[lv]=np.std(IPRit) # pwrsd[lv]=np.std(pwrit) # freqsd[lv]=np.std(frequencyit) IPR2[lv] = np.average(IPR2it) pn1[lv] = np.average(pnit) d11[lv] = np.average(d1it) d21[lv] = np.average(d2it) d31[lv] = np.average(d3it) d41[lv] = np.average(d4it) # d1sd[lv] = np.std(d1it) # d2sd[lv] = np.std(d2it) # d3sd[lv]=np.std(d3it) # d4sd[lv]=np.std(d4it) eigfreq1[lv] = np.copy(freq2) if arrayid == 0 or int(Nmitral / 2): cell_act[:, :, lv] = np.copy(yout2) return dmgpct1, eigfreq1, d11, d21, d31, d41, pwr1, IPR1, IPR2, pn1, freq1, cell_act
def pwr_spec(tvar, nbp=256, nsp=128, name=None): """ Calculates the power spectrum of a line, and adds a tplot variable for this new spectrogram Parameters: tvar : str Name of tvar to use nbp : int, optional The number of points to use when calculating the FFT nsp : int, optional The number of points to shift over to calculate the next FFT name : str, optional The name of the new tplot variable created, Returns: None Examples: >>> pytplot.cdf_to_tplot("/path/to/pytplot/testfiles/mvn_euv_l2_bands_20170619_v09_r03.cdf") >>> pytplot.tplot_math.split_vec('data') >>> pytplot.pwr_spec('data_0') >>> pytplot.tplot('data_0_pwrspec') """ x = pytplot.data_quants[tvar].coords['time'] y = pytplot.data_quants[tvar].values.squeeze() if len(y.shape) > 1: print("Can only perform action for a single line") l = len(x) x_new = [] f_new = [] pxx_new = [] shift_lsp = np.arange(0, l - 1, nsp) for i in shift_lsp: x_n = x[i:i + nbp] y_n = y[i:i + nbp] if len(x_n) < nbp: continue median_diff_between_points = np.median(np.diff(x_n)) w = signal.get_window("hanning", nbp) f, pxx = signal.periodogram(y_n, fs=(1 / median_diff_between_points), window=w, detrend='linear') f = f[1:-1] pxx = pxx[1:-1] x_new.append((x_n[-1] + x_n[0]) / 2) f_new.append(f) pxx_new.append(pxx) if name is None: name = tvar + "_pwrspec" pytplot.store_data(name, data={'x': x_new, 'y': pxx_new, 'v': f_new}) pytplot.options(name, 'spec', 1) pytplot.options(name, 'zlog', 1) pytplot.options(name, 'ylog', 1) return
import sunpy.timeseries from sunpy.data.sample import RHESSI_TIMESERIES ############################################################################### # Let's first load a RHESSI TimeSeries from sunpy's sample data. # This data contains 9 columns, which are evenly sampled with a time step of 4 # seconds. ts = sunpy.timeseries.TimeSeries(RHESSI_TIMESERIES) ############################################################################### # We now use SciPy's `~scipy.signal.periodogram` to estimate the # power spectra of the first column of the Timeseries. The first column contains # X-Ray emmisions in the range of 3-6 keV. An alternative version is Astropy's # `~astropy.timeseries.LombScargle` periodogram. x_ray = ts.columns[0] # The suitable value for fs would be 0.25 Hz as the time step is 4 s. freq, spectra = signal.periodogram(ts.quantity(x_ray), fs=0.25) ############################################################################### # Let's plot the results. plt.figure() plt.semilogy(freq, spectra) plt.title(f'Power Spectrum of {x_ray}') plt.ylabel('Power Spectral Density [{:LaTeX}]'.format(ts.units[x_ray] ** 2 / u.Hz)) plt.xlabel('Frequency [Hz]') plt.show()
def getPeriodogramm_Estimation_4_CXX(signal, sampling): f_period, Pxx_den_period = sg.periodogram(signal, sampling, scaling='spectrum') return f_period, Pxx_den_period
def extract(self, x): return periodogram(x, fs=self.sampling_frequency, window='hann')[1]
def power_spectrum(input, output, time_step_size, method="scipyffthalf", o_i="oi"): """ This script computes the power spectral density estimate of a time series. As default, output spectrum is devided by input spectrum. You can choose between three methods. 'scipyffthalf' and 'scipyperio' reveals almost exactly the same results. 'scipywelch' computes a smoothed periodogram. Parameters ---------- input : 1D array, list Time series of and input process of e.g. a LTI system. If considering an aquifer as filter of the LTI, the input signal would be equal to the recharge time series of the aquifer. output : 1D array, list Time series of and input process of e.g. a LTI system. If considering an aquifer as filter of the LTI, the ouput signal would be euqual to the head time seires of the aquifer. time_step_size : integer The size of the time step between every data point in seconds. method : string, Default: 'scipyffthalf' Method which will be used to derive the spectrum. 'scipyffthalf' # ====================================================================== # method 1: Periodogram: Power Spectral Density: abs(X(w))^2 # http://staff.utia.cas.cz/barunik/files/QFII/04%20-%20Seminar/04-qf.html # ====================================================================== 'scipywelch' # ====================================================================== # method 2: scipy.signal.welch # https://docs.scipy.org/doc/scipy-0.14.0/reference/generated/scipy.signal.welch.html#r145 # ====================================================================== 'scipyperio' # ====================================================================== # method 3: Scipy.signal.periodogram # https://docs.scipy.org/doc/scipy-0.18.1/reference/generated/scipy.signal.periodogram.html # ====================================================================== o_i : string 'o_i' : output spectrum will be devided by input spectrum 'i' : only input spectrum will be returned 'o' : only output spectrum will be returned Yields ------ frequency_xx : 1D array Corresponding frequencies of the Fourier Transform. power_spectrum_xx : 1D array Power spectrum of time series. Suggested Improvements ---------------------- - Make input an optional argument! New structure: Every method is a function. """ import numpy as np if np.shape(input) != np.shape(output) and o_i == "oi": raise ValueError("x and y must have same length.") if np.asarray(input).ndim != 1: raise ValueError("x and y must have dimension = 1.") len_input = len(input) len_output = len(output) # define the sampling frequency/time step # ------------------------------------------------------------------------- sampling_frequency = 1.0 / time_step_size # [Hz] second: 1, day: 1.1574074074074E-5 # methodologies for power spectral density # ------------------------------------------------------------------------- if method == "scipyffthalf_russian": import scipy.fftpack as fftpack # first value was popped because frequencies are very low (=0) and cause errors while fitting power_spectrum_input = fftpack.fft(input) power_spectrum_output = fftpack.fft(output) if len_input == len_output: power_spectrum_result = power_spectrum_output / power_spectrum_input power_spectrum_result = abs(power_spectrum_result[:int( round(len(power_spectrum_result) / 2))])**2 power_spectrum_result = power_spectrum_result[1:] frequency_input = (abs(fftpack.fftfreq( len_input, time_step_size))[:int(round(len_output / 2))])[1:] frequency_output = (abs(fftpack.fftfreq( len_output, time_step_size))[:int(round(len_output / 2))])[1:] elif method == "scipyffthalf": import scipy.fftpack as fftpack # first value was popped because frequencies are very low (=0) and cause errors while fitting spectrum = fftpack.fft(input) spectrum = abs(spectrum[:int(round(len(spectrum) / 2))])**2 power_spectrum_input = spectrum[1:] spectrum = fftpack.fft(output) spectrum = abs(spectrum[:int(round(len(spectrum) / 2))])**2 power_spectrum_output = spectrum[1:] if len_input == len_output: power_spectrum_result = power_spectrum_output / power_spectrum_input frequency_input = (abs(fftpack.fftfreq( len_input, time_step_size))[:int(round(len_input / 2))])[1:] frequency_output = (abs(fftpack.fftfreq( len_output, time_step_size))[:int(round(len_output / 2))])[1:] elif method == "scipywelch": from scipy import signal nperseg = int(round(len(input) / 10)) frequency_input, power_spectrum_input = signal.welch( input, sampling_frequency, nperseg=nperseg, window="hamming") frequency_output, power_spectrum_output = signal.welch( output, sampling_frequency, nperseg=nperseg, window="hamming") if len_input == len_output: power_spectrum_result = power_spectrum_output / power_spectrum_input elif method == "scipyperio": from scipy import signal frequency_input, power_spectrum_input = signal.periodogram( input, fs=sampling_frequency) frequency_output, power_spectrum_output = signal.periodogram( output, fs=sampling_frequency) frequency_output = frequency_output[1:] frequency_input = frequency_input[1:] power_spectrum_input = power_spectrum_input[1:] power_spectrum_output = power_spectrum_output[1:] if len_input == len_output: power_spectrum_result = power_spectrum_output / power_spectrum_input else: print("Method not valid.") if o_i == "i": return np.asarray(frequency_input), np.asarray(power_spectrum_input) # return frequency_input, power_spectrum_input elif o_i == "o": return np.asarray(frequency_output), np.asarray(power_spectrum_output) # return frequency_input, power_spectrum_input elif o_i == "oi": return np.asarray(frequency_input), np.asarray(power_spectrum_result)
def power_density(f, A, f0): return A / (f**2 / f0 + f0) name = os.getcwd().split('/')[-1] with open(f'results/results_with_force_{name}.txt', 'w') as file: file.write(f'#Amplitudes and roll-off frequencies for laser power {name} \n') #ANALYZE DATA WITH FORCE IN X DIRECTION x, y, sum = np.genfromtxt(f'{name}_x.dat', unpack = True) xcal, ycal = np.genfromtxt(f'zcalibration.txt', unpack = True) x /= xcal y /= ycal fs = 10e3 f, Pxx_den = signal.periodogram(x - np.mean(x), fs) dt = 1 / fs t = np.arange(0, len(x)*dt, step = dt) fig = plt.figure(figsize = (5.7, 7.8)) ax1 = fig.add_subplot(411) ax1.plot(t, x, label = 'Daten') ax1.set_xlabel(r'$t / \si{\second}$') ax1.set_ylabel(r'$x / \si{\micro\meter}$') ax1.set_xlim(t[0], t[-1]) ax1.legend(loc = 'lower left') ax1.text(0.03 , 0.96, r'(a)', horizontalalignment='left', verticalalignment='top', transform = ax1.transAxes, color = 'black') #fig.savefig(f'xplot_{name}.pdf', bbox_inches = 'tight', pad_inches = 0) ax2 = fig.add_subplot(412)
plt.semilogx(wfir / np.pi * nyquist, np.unwrap(np.angle(hfir)), 'r') plt.xlabel('Frequency, 1/day') plt.ylabel('Phase shift, degree') plt.xlim([1 / (N * dt), nyquist]) plt.subplot(4, 1, 3) plt.plot(t, y, 'k') plt.plot(t, yfilt, 'b') plt.plot(t, yfiltfir, 'r') plt.xlabel('time, day') plt.ylabel('signal') plt.legend(['Unfiltered', 'Butterworth filter', 'FIR filter']) plt.subplot(4, 1, 4) # calculate spectral power density f, pow = sig.periodogram(y, fs) f, powfilt = sig.periodogram(yfilt, fs) f, powfiltfir = sig.periodogram(yfiltfir, fs) plt.loglog(f, pow, 'k') plt.plot(f, powfilt, 'b') plt.plot(f, powfiltfir, 'r') plt.ylim([1e-6, 1e3]) plt.xlim([1 / (N * dt), nyquist]) plt.xlabel('frequency, 1/day') plt.ylabel('power density') plt.legend(['Unfiltered', 'Butterworth filter', 'FIR filter'], loc='upper center') plt.text(1 / 7, 10, '7 day cycle') plt.text(1 / 365, 10, '365-day cycle') plt.tight_layout()
def __load__(self): tic = time.time() # get list of days using distinct(utc_time_day) from table accel_mod try: con = psycopg2.connect(database = self.db, user = self.user, password = self.password) cur = con.cursor() command = f"SELECT distinct(utc_time_day) FROM accel_mod ORDER BY utc_time_day;" df_day_list = pd.read_sql_query(command, con) day_list = list(df_day_list['utc_time_day']) con.commit() except psycopg2.DatabaseError as e: if con: con.rollback() print(f'Error {e}') sys.exit(1) except IOError as e: if con: con.rollback() print(f'Error {e}') sys.exit(1) finally: if con: con.close() # for each day for day in day_list: # get a pandas dataframe with the accel data from that day try: con = psycopg2.connect(database = self.db, user = self.user, password = self.password) cur = con.cursor() command = f"SELECT utc_time, utc_time_hr, mag FROM accel_mod WHERE patientID = '{self.patient_ID}' and utc_time_day = '{day}' ORDER BY utc_time_day;" df_one_day = pd.read_sql_query(command, con) con.commit() except psycopg2.DatabaseError as e: if con: con.rollback() print(f'Error {e}') sys.exit(1) except IOError as e: if con: con.rollback() print(f'Error {e}') sys.exit(1) finally: if con: con.close() # get a list of distinct hours from the dataframe hours_list = list(df_one_day.utc_time_hr.unique()) # initialize list that will contain rows to be writtent to the hourly_fft table fft_by_hour = [] # for each hour for hour in hours_list: # get all ordered mag column from all accel_mod rows that match that hour # first get all matching rows from df_one_day df_hour = df_one_day.loc[df_one_day['utc_time_hr'] == hour] # then sort on utc_time df_hour = df_hour.sort_values(by=['utc_time']) # then get just the mag column as a list mag_vect = df_hour['mag'].tolist() hr_len = len(mag_vect) if hr_len >= 100: # get energy of signal hr_len = len(mag_vect) energy = np.sum(np.square(mag_vect)) / hr_len; # get power spectral density of signal nfft = hr_len // 11 freqs, Pxx = signal.periodogram(mag_vect, fs = 10, nfft = nfft) # get average power in several frequency windows # due to the sampling frequency of 10 Hz, the FFT range is from 0 to 5 Hz # bins: 0-1, 1-2, 2-3, 3-4, 4-5 df_power = pd.DataFrame() df_power['freqs'] = freqs df_power['Pxx'] = Pxx power1 = np.array(df_power[df_power['freqs'].between(0, 1)]['Pxx']) pow_len = len(power1) power1 = np.sum(power1) / pow_len power2 = np.array(df_power[df_power['freqs'].between(1, 2)]['Pxx']) pow_len = len(power2) power2 = np.sum(power2) / pow_len power3 = np.array(df_power[df_power['freqs'].between(2, 3)]['Pxx']) pow_len = len(power3) power3 = np.sum(power3) / pow_len power4 = np.array(df_power[df_power['freqs'].between(3, 4)]['Pxx']) pow_len = len(power4) power4 = np.sum(power4) / pow_len power5 = np.array(df_power[df_power['freqs'].between(4, 5)]['Pxx']) pow_len = len(power5) power5 = np.sum(power5) / pow_len this_hour = [self.patient_ID, hour, energy, power1, power2, power3, power4, power5] fft_by_hour.append(this_hour) # write to SQL table try: con = psycopg2.connect(database = self.db, user = self.user, password = self.password) cur = con.cursor() for hour in fft_by_hour: command = f"INSERT INTO hourly_fft({column_names[0]}, {column_names[1]}, {column_names[2]}, {column_names[3]}, {column_names[4]}, {column_names[5]}, {column_names[6]}, {column_names[7]}) VALUES('{self.patient_ID}', '{hour[1]}', {hour[2]}, {hour[3]}, {hour[4]}, {hour[5]}, {hour[6]}, {hour[6]})" cur.execute(command) con.commit() except psycopg2.DatabaseError as e: if con: con.rollback() print(f'Error {e}') sys.exit(1) except IOError as e: if con: con.rollback() print(f'Error {e}') sys.exit(1) finally: if con: con.close() toc = time.time() print(toc-tic)
def period_detect(series, fs=1440, threshold=0.2, periodogram_candiate=8, max_error=0.005, segment_method="topdownsegment"): # dau vao df theo dinh dang cua twitter # fs: tan so lay mau (sample per day) if not isinstance(series, pd.Series): raise ValueError(("data must be a single data frame, " "list, or vector that holds numeric values.")) n = len(series) print(n) # mien tan so data_value_trans = series - series.median() f, Pxx_den = signal.periodogram(data_value_trans, fs, window=signal.get_window('hamming', n)) # print(len(f)) # print(f) # t = 0.02 * np.max(Pxx_den) # i_draw = [i for i in range(1, Pxx_den.size) if Pxx_den[i]>=t and f[i] < (n-1)/fs] # p_draw = [f[i] for i in i_draw] # y_draw = [Pxx_den[i] for i in i_draw] # plt.semilogy(p_draw, y_draw) # # plt.semilogy(f, Pxx_den) # plt.xlabel('frequency [Hz]') # plt.ylabel('PSD [V**2/Hz]') # plt.show() # chon nguong 40 % threshold = threshold * np.max(Pxx_den) index_period_candidate = [ i for i in range(1, Pxx_den.size - 1) if ((Pxx_den[i] > threshold) and (Pxx_den[i] > Pxx_den[i + 1]) and ( Pxx_den[i] > Pxx_den[i - 1])) ] period_candidate = [ f[i] for i in index_period_candidate if (f[i] < (n - 1) / fs) ] period_candidate_pxx = [ Pxx_den[i] for i in index_period_candidate if (f[i] < (n - 1) / fs) ] # plt.semilogy(period_candidate, period_candidate_pxx) # # plt.semilogy(f, Pxx_den) # plt.xlabel('frequency [Hz]') # plt.ylabel('PSD [V**2/Hz]') # plt.show() t = {'period': period_candidate, 'magnitude': period_candidate_pxx} period_candidate_point = pd.DataFrame(t) # chi lay 1 so luong candidate nhat dinh # ham nlargest tra lai thu tu theo manitude gian dan # do do ung vien dau tien la ung vin co Pxx lon nhat period_candidate_point = period_candidate_point.nlargest( periodogram_candiate, 'magnitude') lag = range(0, n - 1) autocorr = [ np.correlate(series, np.roll(series, -i))[0] / series.size for i in lag ] days = [l * 1.0 / fs for l in lag] ids = [ next(i for i in range(1, len(days) - 1) if p > days[i - 1] and p < days[i + 1]) for p in period_candidate ] ps = [days[i] for i in ids] psy = [autocorr[i] for i in ids] print(period_candidate) print(ps) print(psy) plt.scatter(ps, psy) plt.plot(days, autocorr) plt.xlabel('day') plt.ylabel('ACF') plt.show() # ACF_candidate = [autocorr[int(i*fs)] for i in period_candidate_point['period']] final_all_period = [] for period_temp in period_candidate_point['period']: startpoint = (int)(period_temp * fs) temp = autocorr[startpoint] begin_frame = np.max([(startpoint - fs), 0]) end_frame = np.min([startpoint + fs, len(autocorr)]) max = np.max(autocorr[begin_frame:end_frame]) min = np.min(autocorr[begin_frame:end_frame]) tb = (max + min) / 2 if (max - min > 0): autocorr_normalize = (np.array(autocorr) - tb) / (max - min) else: return final_all_period segments = [] try: if (segment_method == "slidingwindowsegment"): segments = segment.slidingwindowsegment( autocorr_normalize[begin_frame:end_frame], fit.regression, fit.sumsquared_error, max_error) if (segment_method == "topdownsegment"): segments = segment.topdownsegment( autocorr_normalize[begin_frame:end_frame], fit.regression, fit.sumsquared_error, max_error) if (segment_method == "bottomupsegment"): segments = segment.bottomupsegment( autocorr_normalize[begin_frame:end_frame], fit.regression, fit.sumsquared_error, max_error) except: pass if len(segments) < 3: continue # check xem co la hill ko # diem start point la 200 (trong khoang moi 401 diem dang xet) # tim doan seg cua diem nay # seg_index = 0 for i in range(0, len(segments)): if startpoint - begin_frame < segments[i][2]: seg_index = i break if ((seg_index < 2) or (seg_index > len(segments) - 2)): continue dh_trai = (segments[seg_index][3] - segments[seg_index][1]) - ( segments[seg_index - 1][3] - segments[seg_index - 1][1]) dh_phai = (segments[seg_index + 1][3] - segments[seg_index + 1][1]) - ( segments[seg_index][3] - segments[seg_index][1]) if ((dh_phai < 0) and (dh_trai < 0)): # diem nam tren hill tien hanh tim closest peak while (segments[seg_index][3] > segments[seg_index][1]): # di tu trai sang phai # khi nao ma dao ham con duong thi di tu trai sang phai seg_index = seg_index + 1 if (seg_index > len(segments) - 2): break while (segments[seg_index][3] < segments[seg_index][1]): # khi nao dao ham con am thi di tu phai sang trai seg_index = seg_index - 1 if ((seg_index < 2)): break if ((seg_index >= 2) and (seg_index <= len(segments) - 2)): final_period = segments[seg_index][2] final_all_period.append(final_period + begin_frame) return [x * 1.0 / fs for x in final_all_period]
def calc_fd_measures(method='welch', square_spectrum=True, measures={}, working_data={}): '''calculates the frequency-domain measurements. Function that calculates the frequency-domain measurements for HeartPy. Parameters ---------- method : str method used to compute the spectrogram of the heart rate. available methods: fft, periodogram, and welch default : welch square_spectrum : bool whether to square the power spectrum returned. default : true measures : dict dictionary object used by heartpy to store computed measures. Will be created if not passed to function. working_data : dict dictionary object that contains all heartpy's working data (temp) objects. will be created if not passed to function Returns ------- working_data : dict dictionary object that contains all heartpy's working data (temp) objects. measures : dict dictionary object used by heartpy to store computed measures. Examples -------- Normally this function is called during the process pipeline of HeartPy. It can of course also be used separately. Let's load an example and get a list of peak-peak intervals >>> import heartpy as hp >>> data, timer = hp.load_exampledata(2) >>> sample_rate = hp.get_samplerate_datetime(timer, timeformat='%Y-%m-%d %H:%M:%S.%f') >>> wd, m = hp.process(data, sample_rate) wd now contains a list of peak-peak intervals that has been cleaned of outliers ('RR_list_cor'). Calling the function then is easy >>> wd, m = calc_fd_measures(method = 'periodogram', measures = m, working_data = wd) >>> print('%.3f' %m['lf/hf']) 4.964 Available methods are 'fft', 'welch' and 'periodogram'. To set another method, do: >>> wd, m = calc_fd_measures(method = 'fft', measures = m, working_data = wd) >>> print('%.3f' %m['lf/hf']) 4.964 If there are no valid peak-peak intervals specified, returned measures are NaN: >>> wd['RR_list_cor'] = [] >>> wd, m = calc_fd_measures(working_data = wd) >>> np.isnan(m['lf/hf']) True If there are rr-intervals but not enough to reliably compute frequency measures, a warning is raised: -------------- RuntimeWarning: Short signal. ---------Warning:--------- too few peak-peak intervals for (reliable) frequency domain measure computation, frequency output measures are still computed but treat them with caution! HF is usually computed over a minimum of 1 minute of good signal. LF is usually computed over a minimum of 2 minutes of good signal. The LF/HF ratio is usually computed over minimum 24 hours, although an absolute minimum of 5 min has also been suggested. For more info see: \nShaffer, F., Ginsberg, J.P. (2017). An Overview of Heart Rate Variability Metrics and Norms. Task Force of Pacing and Electrophysiology (1996), Heart Rate Variability in: European Heart Journal, vol.17, issue 3, pp354-381 This warning will not repeat' -------------- ''' rr_list = working_data['RR_list_cor'] if len(rr_list) <= 1: working_data['frq'] = np.nan working_data['psd'] = np.nan measures['lf'] = np.nan measures['hf'] = np.nan measures['lf/hf'] = np.nan return working_data, measures elif np.sum(rr_list) <= 300000: # pragma: no cover #warn if signal is short msg = ''.join(( 'Short signal.\n', '\n---------Warning:---------\n', 'too few peak-peak intervals for (reliable) frequency domain measure computation, ', 'frequency output measures are still computed but treat them with caution!\n\n', 'HF is usually computed over a minimum of 1 minute of good signal. ', 'LF is usually computed over a minimum of 2 minutes of good signal.', 'The LF/HF ratio is usually computed over minimum 24 hours, although an ', 'absolute minimum of 5 min has also been suggested.\n\n', 'For more info see: \nShaffer, F., Ginsberg, J.P. (2017), ', 'An Overview of Heart Rate Variability Metrics and Norms.\n\n', 'Task Force of Pacing and Electrophysiology (1996), Heart Rate Variability, ', 'in: European Heart Journal, vol.17, issue 3, pp354-381' '\n\nThis warning will not repeat')) warnings.warn(msg, UserWarning) rr_x = [] pointer = 0 for x in rr_list: pointer += x rr_x.append(pointer) rr_x_new = np.linspace(rr_x[0], rr_x[-1], rr_x[-1]) interpolated_func = UnivariateSpline(rr_x, rr_list, k=3) if method == 'fft': datalen = len(rr_x_new) frq = np.fft.fftfreq(datalen, d=((1 / 1000.0))) frq = frq[range(int(datalen / 2))] Y = np.fft.fft(interpolated_func(rr_x_new)) / datalen Y = Y[range(int(datalen / 2))] psd = np.power(Y, 2) elif method == 'periodogram': frq, psd = periodogram(interpolated_func(rr_x_new), fs=1000.0) elif method == 'welch': frq, psd = welch(interpolated_func(rr_x_new), fs=1000.0, nperseg=len(rr_x_new) - 1) else: raise ValueError( "specified method incorrect, use 'fft', 'periodogram' or 'welch'") working_data['frq'] = frq working_data['psd'] = psd measures['lf'] = np.trapz(abs(psd[(frq >= 0.04) & (frq <= 0.15)])) measures['hf'] = np.trapz(abs(psd[(frq >= 0.16) & (frq <= 0.5)])) measures['lf/hf'] = measures['lf'] / measures['hf'] working_data['interp_rr_function'] = interpolated_func working_data['interp_rr_linspace'] = (rr_x[0], rr_x[-1], rr_x[-1]) return working_data, measures
# plot(R7.t,R7.i+40,'b.',label='SI') # plot(R8.t,R8.i+0,'c.',label='Fix') # xlim(0,runtime/second) # legend(loc='upper left') min_t = int(50 * ms * 100000 * Hz) LFP_V1 = 1 / 20 * sum(V1.V, axis=0)[min_t:] LFP_V2 = 1 / 20 * sum(V2.V, axis=0)[min_t:] LFP_V3 = 1 / 20 * sum(V3.V, axis=0)[min_t:] LFP_V4 = 1 / 20 * sum(V4.V, axis=0)[min_t:] LFP_V5 = 1 / 20 * sum(V5.V, axis=0)[min_t:] # LFP_V6=1/20*sum(V6.V,axis=0)[min_t:] # LFP_V7=1/20*sum(V7.V,axis=0)[min_t:] f, Spectrum_LFP_V1 = signal.periodogram(LFP_V1, 100000, 'flattop', scaling='spectrum') f, Spectrum_LFP_V2 = signal.periodogram(LFP_V2, 100000, 'flattop', scaling='spectrum') f, Spectrum_LFP_V3 = signal.periodogram(LFP_V3, 100000, 'flattop', scaling='spectrum') f, Spectrum_LFP_V4 = signal.periodogram(LFP_V4, 100000, 'flattop', scaling='spectrum') f, Spectrum_LFP_V5 = signal.periodogram(LFP_V5, 100000,
def run_one_simulation(simu, path, index_var): # print(simu,len(simu)) start_scope() close('all') runtime = 1 * second Vrev_inp = 0 * mV taurinp = 0.1 * ms taudinp = 0.5 * ms tauinp = taudinp Vhigh = 0 * mV Vlow = -80 * mV ginp_IB = 0 * msiemens * cm**-2 ginp_SI = 0 * msiemens * cm**-2 ginp = 0 * msiemens * cm**-2 f_mdPul = 20 * Hz NN = 1 #multiplicative factor on the number of neurons N_RS, N_FS, N_SI, N_IB = NN * 80, NN * 20, NN * 20, NN * 20 #Number of neurons of RE, TC, and HTC type syn_cond, J, thal, theta_phase, index = simu print('Simulation ' + str(index)) if theta_phase == 'bad': input_beta2_IB = False input_beta2_RS = False input_beta2_FS_SI = True input_thalamus_gran = True gFS = 0 * msiemens * cm**-2 ginp_SI = 0 * msiemens * cm**-2 ginpSIdeep = 0 * msiemens * cm**-2 thal_cond = 2 * msiemens * cm**-2 kainate = 'low' input_mixed = False if theta_phase == 'good': # input_beta2_IB=True input_beta2_IB = False ginp_IB = 500 * msiemens * cm**-2 ginpSIdeep = 500 * msiemens * cm**-2 input_beta2_RS = False input_beta2_FS_SI = False input_thalamus_gran = True thal_cond = thal kainate = 'low' input_mixed = False if theta_phase == 'mixed': input_mixed = True ginp_IB = 500 * msiemens * cm**-2 ginpSIdeep = 500 * msiemens * cm**-2 input_beta2_IB = False input_beta2_RS = False input_beta2_RS = False input_beta2_FS_SI = False input_thalamus_gran = False kainate = 'low' net = Network(collect()) print('Network setup') all_neurons, all_synapses, all_gap_junctions, all_monitors = make_full_network( syn_cond, J, thal, f_mdPul, theta_phase) V1, V2, V3, R1, R2, R3, I1, I2, I3, V4, R4, I4s, I4a, I4ad, I4bd, R5, R6, R7, V5, V6, V7, inpmon, inpIBmon = all_monitors net.add(all_neurons) net.add(all_synapses) net.add(all_gap_junctions) net.add(all_monitors) print('Compiling with cython') prefs.codegen.target = 'cython' #cython=faster, numpy = default python net.run(runtime, report='text', report_period=300 * second) figure() plot(R1.t, R1.i + 140, 'r.', label='RS cells') plot(R2.t, R2.i + 120, 'm.', label='FS cells') plot(R3.t, R3.i + 100, 'y.', label='SI cells') plot(R5.t, R5.i + 70, 'g.', label='Granular RS') plot(R6.t, R6.i + 50, 'c.', label='Granular FS') plot(R4.t, R4.i + 20, 'b.', label='IB cells') plot(R7.t, R7.i, 'k.', label='Deep SI') xlim(0, runtime / second) legend(loc='upper left') figure() plot(inpmon.t, inpmon.Iinp1[0]) min_t = int(50 * ms * 100000 * Hz) LFP_V_RS = 1 / N_RS * sum(V1.V, axis=0)[min_t:] LFP_V_FS = 1 / N_FS * sum(V2.V, axis=0)[min_t:] LFP_V_SI = 1 / N_SI * sum(V3.V, axis=0)[min_t:] LFP_V_IB = 1 / N_IB * sum(V4.V, axis=0)[min_t:] LFP_V_RSg = 1 / N_FS * sum(V5.V, axis=0)[min_t:] LFP_V_FSg = 1 / N_FS * sum(V6.V, axis=0)[min_t:] LFP_V_SId = 1 / N_SI * sum(V7.V, axis=0)[min_t:] f, Spectrum_LFP_V_RS = signal.periodogram(LFP_V_RS, 100000, 'flattop', scaling='spectrum') f, Spectrum_LFP_V_FS = signal.periodogram(LFP_V_FS, 100000, 'flattop', scaling='spectrum') f, Spectrum_LFP_V_SI = signal.periodogram(LFP_V_SI, 100000, 'flattop', scaling='spectrum') f, Spectrum_LFP_V_IB = signal.periodogram(LFP_V_IB, 100000, 'flattop', scaling='spectrum') f, Spectrum_LFP_V_RSg = signal.periodogram(LFP_V_RSg, 100000, 'flattop', scaling='spectrum') f, Spectrum_LFP_V_FSg = signal.periodogram(LFP_V_FSg, 100000, 'flattop', scaling='spectrum') f, Spectrum_LFP_V_SId = signal.periodogram(LFP_V_SId, 100000, 'flattop', scaling='spectrum') figure(figsize=(10, 8)) subplot(421) plot(f, Spectrum_LFP_V_RS) ylabel('Spectrum') yticks([], []) xlim(0, 100) title('RS cell') subplot(422) plot(f, Spectrum_LFP_V_FS) yticks([], []) xlim(0, 100) title('FS cell') subplot(423) plot(f, Spectrum_LFP_V_SI) ylabel('Spectrum') yticks([], []) xlim(0, 100) title('SI cell') subplot(425) plot(f, Spectrum_LFP_V_RSg) ylabel('Spectrum') yticks([], []) xlim(0, 100) title('gran RS cell') subplot(426) plot(f, Spectrum_LFP_V_FSg) yticks([], []) xlim(0, 100) title('gran FS cell') subplot(427) plot(f, Spectrum_LFP_V_IB) xlabel('Frequency (Hz)') ylabel('Spectrum') yticks([], []) xlim(0, 100) title('IB cell') subplot(428) plot(f, Spectrum_LFP_V_SId) yticks([], []) xlim(0, 100) xlabel('Frequency (Hz)') title('deep SI cell') tight_layout() # min_t=int(50*ms*100000*Hz) # LFP_V_RS=1/N_RS*sum(V1.V,axis=0)[min_t:] # LFP_V_FS=1/N_FS*sum(V2.V,axis=0)[min_t:] # LFP_V_SI=1/N_SI*sum(V3.V,axis=0)[min_t:] # LFP_V_IB=1/N_IB*sum(V4.V,axis=0)[min_t:] # # f,Spectrum_LFP_V_RS=signal.periodogram(LFP_V_RS, 100000,'flattop', scaling='spectrum') # f,Spectrum_LFP_V_FS=signal.periodogram(LFP_V_FS, 100000,'flattop', scaling='spectrum') # f,Spectrum_LFP_V_SI=signal.periodogram(LFP_V_SI, 100000,'flattop', scaling='spectrum') # f,Spectrum_LFP_V_IB=signal.periodogram(LFP_V_IB, 100000,'flattop', scaling='spectrum') # # figure() # subplot(421) # plot((V1.t/second)[min_t:],LFP_V_RS) # ylabel('LFP') # title('RS cell') # subplot(423) # plot((V1.t/second)[min_t:],LFP_V_FS) # ylabel('LFP') # title('FS cell') # subplot(425) # plot((V1.t/second)[min_t:],LFP_V_SI) # ylabel('LFP') # title('SI cell') # subplot(427) # plot((V1.t/second)[min_t:],LFP_V_IB) # xlabel('Time (s)') # ylabel('LFP') # title('IB cell') # # subplot(422) # plot(f,Spectrum_LFP_V_RS) # ylabel('Spectrum') # yticks([],[]) # xlim(0,100) # title('RS cell') # subplot(424) # plot(f,Spectrum_LFP_V_FS) # ylabel('Spectrum') # yticks([],[]) # xlim(0,100) # title('FS cell') # subplot(426) # plot(f,Spectrum_LFP_V_SI) # ylabel('Spectrum') # yticks([],[]) # xlim(0,100) # title('SI cell') # subplot(428) # plot(f,Spectrum_LFP_V_IB) # xlabel('Frequency (Hz)') # ylabel('Spectrum') # yticks([],[]) # xlim(0,100) # title('IB cell') ##save figures new_path = path + "/results_" + str(index) os.mkdir(new_path) for n in get_fignums(): current_fig = figure(n) current_fig.savefig(new_path + '/figure' + str(n) + '.png')
def compute_first_order(permuted_outputs, M): _, Pxx = periodogram(permuted_outputs) V = np.sum(Pxx[1:]) D1 = np.sum(Pxx[1: M + 1]) return D1 / V
def plot_psd(dpl, *, fmin=0, fmax=None, tmin=None, tmax=None, layer='agg', ax=None, show=True): """Plot power spectral density (PSD) of dipole time course Applies `~scipy.signal.periodogram` from SciPy with ``window='hamming'``. Note that no spectral averaging is applied across time, as most ``hnn_core`` simulations are short-duration. However, passing a list of `Dipole` instances will plot their average (Hamming-windowed) power, which resembles the `Welch`-method applied over time. Parameters ---------- dpl : instance of Dipole | list of Dipole instances The Dipole object. fmin : float Minimum frequency to plot (in Hz). Default: 0 Hz fmax : float Maximum frequency to plot (in Hz). Default: None (plot up to Nyquist) tmin : float or None Start time of data to include (in ms). If None, use entire simulation. tmax : float or None End time of data to include (in ms). If None, use entire simulation. layer : str, default 'agg' The layer to plot. Can be one of 'agg', 'L2', and 'L5' ax : instance of matplotlib figure | None The matplotlib axis. show : bool If True, show the figure Returns ------- fig : instance of matplotlib Figure The matplotlib figure handle. """ import matplotlib.pyplot as plt from scipy.signal import periodogram from .dipole import Dipole if ax is None: _, ax = plt.subplots(1, 1, constrained_layout=True) if isinstance(dpl, Dipole): dpl = [dpl] scale_applied = dpl[0].scale_applied sfreq = dpl[0].sfreq trial_power = [] for dpl_trial in dpl: if dpl_trial.scale_applied != scale_applied: raise RuntimeError('All dipoles must be scaled equally!') if dpl_trial.sfreq != sfreq: raise RuntimeError('All dipoles must be sampled equally!') data, _ = _get_plot_data_trange(dpl_trial.times, dpl_trial.data[layer], tmin, tmax) freqs, Pxx = periodogram(data, sfreq, window='hamming', nfft=len(data)) trial_power.append(Pxx) ax.plot(freqs, np.mean(np.array(Pxx, ndmin=2), axis=0)) if fmax is not None: ax.set_xlim((fmin, fmax)) ax.ticklabel_format(axis='both', scilimits=(-2, 3)) ax.set_xlabel('Frequency (Hz)') if scale_applied == 1: ylabel = 'Power spectral density\n(nAm' + r'$^2 \ Hz^{-1}$)' else: ylabel = 'Power spectral density\n' +\ r'([nAm$\times$ {:.0f}]'.format(scale_applied) +\ r'$^2 \ Hz^{-1}$)' ax.set_ylabel(ylabel, multialignment='center') plt_show(show) return ax.get_figure()
def create_periodogram(freq): return signal.periodogram(freq, fs=SAMPLE_RATE)
def plot_results(pilot_dir, subj, channel, alpha_band=(9, 14), theta_band=(3, 6), drop_channels=None, dc=False, reject_alpha=True, normalize_by='opened'): drop_channels = drop_channels or [] cm = get_colors() fg = plt.figure(figsize=(30, 6)) for j_s, experiment in enumerate(subj): with h5py.File('{}\\{}\\{}'.format(pilot_dir, experiment, 'experiment_data.h5')) as f: rejections, top_alpha, top_ica = load_rejections( f, reject_alpha=reject_alpha) fs, channels, p_names = get_info(f, drop_channels) ch = channels.index(channel) #plt.plot(fft_filter(f['protocol6/raw_data'][:, ch], fs, band=(3, 35))) #plt.plot(fft_filter(np.dot(f['protocol6/raw_data'], rejections)[:, ch], fs, band=(3, 35))) #plt.show() #from scipy.signal import welch #plt.plot(*welch(f['protocol1/raw_data'][:60*500//2, channels.index('C3')], fs, nperseg=1000)) #plt.plot(*welch(f['protocol1/raw_data'][60*500//2:, channels.index('C3')], fs, nperseg=1000)) #plt.plot(*welch(f['protocol2/raw_data'][:30*500//2, channels.index('C3')], fs, nperseg=1000)) #plt.plot(*welch(f['protocol2/raw_data'][30*500//2:, channels.index('C3')], fs, nperseg=1000)) #plt.legend(['Close', 'Open', 'Left', 'Right']) #plt.show() # collect powers powers = OrderedDict() raw = OrderedDict() alpha = OrderedDict() pow_theta = [] for j, name in enumerate(p_names): pow, alpha_x, x = get_protocol_power(f, j, fs, rejections, ch, alpha_band, dc=dc) if 'FB' in name: pow_theta.append( get_protocol_power(f, j, fs, rejections, ch, theta_band, dc=dc)[0].mean()) powers = add_data(powers, name, pow, j) raw = add_data(raw, name, x, j) alpha = add_data(alpha, name, alpha_x, j) # plot rejections n_tops = top_ica.shape[1] + top_alpha.shape[1] for j_t in range(top_ica.shape[1]): ax = fg.add_subplot( 4, n_tops * len(subj), n_tops * len(subj) * 3 + n_tops * j_s + j_t + 1) ax.set_xlabel('ICA{}'.format(j_t + 1)) labels, fs = get_lsl_info_from_xml(f['stream_info.xml'][0]) channels = [ label for label in labels if label not in drop_channels ] pos = ch_names_to_2d_pos(channels) plot_topomap(data=top_ica[:, j_t], pos=pos, axes=ax, show=False) for j_t in range(top_alpha.shape[1]): ax = fg.add_subplot( 4, n_tops * len(subj), n_tops * len(subj) * 3 + n_tops * j_s + j_t + 1 + top_ica.shape[1]) ax.set_xlabel('CSP{}'.format(j_t + 1)) labels, fs = get_lsl_info_from_xml(f['stream_info.xml'][0]) channels = [ label for label in labels if label not in drop_channels ] pos = ch_names_to_2d_pos(channels) plot_topomap(data=top_alpha[:, j_t], pos=pos, axes=ax, show=False) # plot powers if normalize_by == 'opened': norm = powers['1. Opened'].mean() elif normalize_by == 'beta': norm = np.mean(pow_theta) else: print('WARNING: norm = 1') print('norm', norm) ax1 = fg.add_subplot(3, len(subj), j_s + 1) ax = fg.add_subplot(3, len(subj), j_s + len(subj) + 1) t = 0 for j_p, ((name, pow), (name, x)) in enumerate(zip(powers.items(), raw.items())): if name == '2228. FB': from scipy.signal import periodogram fff = plt.figure() fff.gca().plot(*periodogram(x, fs, nfft=fs * 3), c=cm[name.split()[1]]) plt.xlim(0, 80) plt.ylim(0, 3e-11) plt.show() print(name) time = np.arange(t, t + len(x)) / fs color = cm[''.join( [i for i in name.split()[1] if not i.isdigit()])] ax1.plot(time, fft_filter(x, fs, (2, 45)), c=color, alpha=0.4) ax1.plot(time, alpha[name], c=color) t += len(x) ax.plot([j_p], [pow.mean() / norm], 'o', c=color, markersize=10) ax.errorbar([j_p], [pow.mean() / norm], yerr=pow.std() / norm, c=color, ecolor=color) fb_x = np.hstack([[j] * len(pows) for j, (key, pows) in enumerate(powers.items()) if 'FB' in key]) fb_y = np.hstack( [pows for key, pows in powers.items() if 'FB' in key]) / norm sns.regplot(x=fb_x, y=fb_y, ax=ax, color=cm['FB'], scatter=False, truncate=True) ax1.set_xlim(0, t / fs) ax1.set_ylim(-40, 40) plt.setp(ax.xaxis.get_majorticklabels(), rotation=70) ax.set_xticks(range(len(powers))) ax.set_xticklabels(powers.keys()) ax.set_ylim(0, 3) ax.set_xlim(-1, len(powers)) ax1.set_title('Day {}'.format(j_s + 1)) return fg
def get_power_spectra(kernel, sigma, plot_flag=PLOT_FLAG): r, d, nr, nd = get_all_data() # Generate periodogram vals for music & noise f, pxx = signal.periodogram(d, r, return_onesided=False) nf, npxx = signal.periodogram(nd, nr, return_onesided=False) # Do convolution: convolved = signal.fftconvolve(pxx, kernel, 'same') nconvolved = signal.fftconvolve(npxx, kernel, 'same') if plot_flag: # Plot all power spectra # First, switch indices so that they make sense again (FFTs in numpy switch them up) f_ = fftshift(f) pxx_ = fftshift(pxx) npxx_ = fftshift(npxx) convolved_ = fftshift(convolved) nconvolved_ = fftshift(nconvolved) fig, axs = plt.subplots(2, 2, sharex=True) fig.suptitle("Full Power Spectra, Convolved with Gaussian (sigma = " + str(sigma) + ")") axs[0, 0].semilogy(f_, pxx_) axs[0, 0].set_title("Signal Periodogram") axs[0, 1].semilogy(f_, npxx_) axs[0, 1].set_title("Noise Periodogram") axs[1, 0].semilogy(f_, convolved_) axs[1, 0].set_title("Signal Periodogram, convolved") axs[1, 1].semilogy(f_, nconvolved_) axs[1, 1].set_title("Noise Periodogram, convolved") for a in axs.flat: a.set_ylim([1e-6, 1e10]) a.set(xlabel='Frequency (Hz)', ylabel='PSD (V**2/Hz)') for a in axs.flat: a.label_outer() fig.tight_layout() plt.show() # Plot positive-valued spectra up to Nyquist Frequency fig, axs = plt.subplots(2, 2) fig.suptitle( "Positive Power Spectra, Convolved with Gaussian (sigma = " + str(sigma) + "), Up to f_Nyquist") axs[0, 0].semilogy(f_, pxx_) axs[0, 0].set_title("Signal Periodogram") axs[0, 1].semilogy(f_, npxx_) axs[0, 1].set_title("Noise Periodogram") axs[1, 0].semilogy(f_, convolved_) axs[1, 0].set_title("Signal Periodogram, convolved") axs[1, 1].semilogy(f_, nconvolved_) axs[1, 1].set_title("Noise Periodogram, convolved") for a in axs.flatten(): a.set_ylim([1e-6, 1e10]) a.set_xlim([0., r / 2]) a.set(xlabel='Frequency (Hz)', ylabel='PSD (V**2/Hz)') for a in axs.flat: a.label_outer() fig.tight_layout() plt.show() return f, pxx, nf, npxx, convolved, nconvolved
def feature_ttp(series, window, step): """Total Power""" windows_strided, indexes = biolab_utilities.moving_window_stride( series.values, window, step) freq, power = signal.periodogram(windows_strided, 5120) return pd.Series(data=np.sum(power, axis=1), index=series.index[indexes])
def update_brainplot_time(threshold, contrast, direction, sslice, hoverData, voxel_disp, datatype): if datatype == 'time': if grouplevel: xtitle = 'Subjects' else: xtitle = 'Time' ytitle = 'Activation (contrast estimate)' else: xtitle = 'Frequency (Hz)' ytitle = 'Power' with open("current_contrast.txt", "r") as text_file: current_contrast = text_file.readlines()[0] if contrast != current_contrast: nonlocal global_contrast, global_func, global_design global_func, global_contrast = load_data(contrast, load_func=True) global_design = read_design_file(op.join(data, contrast)) if cfg['standardize']: global_func = standardize(global_func) with open("current_contrast.txt", "w") as text_file: text_file.write(contrast) if hoverData is None: if grouplevel: x, y = 40, 40 else: x, y = 20, 20 else: x = hoverData['points'][0]['x'] y = hoverData['points'][0]['y'] img = index_by_slice(direction, sslice, global_func) signal = img[x, y, :].ravel() if np.all(np.isnan(signal)): signal = np.zeros(signal.size) if 'model' in voxel_disp and not np.all(signal == 0): betas = np.linalg.lstsq(global_design, signal)[0] signal_hat = betas.dot(global_design.T) fitted_model = go.Scatter(x=np.arange(1, global_func.shape[-1] + 1), y=signal_hat, name='Model fit') if grouplevel: datatype = 'time' plottitle = 'Activation across subjects' bcolors = [ 'rgb(225,20,20)' if sig > 0 else 'rgb(35,53,216)' for sig in signal ] bdata = go.Bar(x=np.arange(1, global_func.shape[-1] + 1), y=signal, name='Activity', marker=dict(color=bcolors, line=dict(color='rgb(211,211,211)', width=0.2))) else: plottitle = 'Activation across time' bdata = go.Scatter(x=np.arange(global_func.shape[-1]), y=signal, name='Activity') layout = go.Layout( autosize=True, margin={ 't': 50, 'l': 50, 'r': 5 }, plot_bgcolor=colors['background'], paper_bgcolor=colors['background'], font={'color': colors['text']}, xaxis=dict( #autorange=False, showgrid=True, zeroline=True, showline=True, autotick=True, #ticks='', showticklabels=True, title=xtitle), yaxis=dict( autorange=True, showgrid=True, zeroline=True, showline=True, autotick=True, #ticks='', showticklabels=True, title=ytitle), title=plottitle) if 'model' in voxel_disp and not np.all(signal == 0): figure = {'data': [bdata, fitted_model], 'layout': layout} else: figure = {'data': [bdata], 'layout': layout} if datatype == 'freq': for i, element in enumerate(figure['data']): from scipy.signal import periodogram dat = element['y'] freq, power = periodogram(dat, 0.5, return_onesided=True) element['y'] = power element['x'] = freq figure['data'][i] = element return figure
def get_peak_freq(x): f, Pxx = scisig.periodogram(x, fs=8) psd_dict = {amp: freq for amp, freq in zip(Pxx, f)} peak_freq = psd_dict[max(psd_dict.keys())] return peak_freq
def med_spec(tr, pp, Fs_old): global WIND, NFFT, idx #%% # tr = irisFetch.Traces(netw,station,'*',chan, ... # datestr(t(i), 'yyyy-mm-dd HH:MM:SS'), datestr(t(i)+coarse_duration/86400, 'yyyy-mm-dd HH:MM:SS')); #% [datestr([tr.startTime]), datestr([tr.endTime])] # if length(tr)~=1 # continue # end Fs = int(tr.stats.sampling_rate) # if rem(Fs,1) ~= 0 % Check to see if the sample rate is an integer # fprintf(1,'\n'); # disp('Sampling frequency (Fs) is not an integer ') # disp('Progress: '); # continue # end # if ~strcmp(tr.sensitivityUnits, 'M/S') # error('Sensitivity units are not in m/s. Modify function appropriately and re-run.') # end # data = tr.data/tr.sensitivity; data = tr.data# * 10**-9 # coarse_dur_retrieved(i) = tr.endTime - tr.startTime; # end #%% # START PROCESSING THE DATA #if Fs*pp['course_duration'] > length(data)+Fs % Skip incomplete hours # continue #end if Fs != Fs_old: # % Only recalculate these metrics when necessary. Most of the time they can stay the same. print('>>> Recalulating idx and WIND <<<') L = int(Fs * pp['fine_duration']) # Length of fine_window data vector NFFT = int(2**np.ceil(np.log2(L))) # Next power of 2 from length of fine data # f = np.linspace(0, Fs/2, num=NFFT/2+1) id1 = np.arange(0, L) # [0:L-1]'; start_offset = np.arange(0, Fs*pp['coarse_duration']-L, L*(1-pp['fine_overlap'])) start_offset = start_offset.astype('int') idx = id1 + start_offset[:, None] # idx = bsxfun(@plus, id1, start_offset); % creates a matrix of indices, which will be used to turn the coarse-window data into many snippets of fine-window data. #% cos_taper = tukeywin(L, 0.2); # wind = np.hanning(L); # WIND = np.tile(wind, (len(start_offset), 1) ) Fs_old = Fs DATA = data[idx] # % turn the coarse-window data into many fine-window snippets of length L # print('DATA complete') # These next three lines are all unnecessary, because they're taken care of in the call to signal.periodogram # DATA = signal.detrend(DATA, axis=0, type='linear') # DATA = detrend(DATA) #; % demeans AND detrends DATA with this one command # DATA = WIND * DATA #; % Apply window to DATA, prior to FFT, to reduce sidelobe leakage. # print('DATA ready for periodogram') freqs, Pxx = signal.periodogram(DATA, fs=Fs, window='hanning', nfft=NFFT, detrend='linear', return_onesided=True, scaling='density', axis=1) # With simple sin wave, TCB confirms Mar 1, 2018 # that np.sum(DATA**2)/L = np.sum(Pxx) * delta_freq, for unwindowed data # Tim further confirms that when the data is windowed in the signal.periodogram # call, that the powers are re-scaled so that Parseval's theorem holds. # but that if the data is hanning windowed first, then sig.per has no # idea (of course), and the powers are reduced by a factor of 2.67 # On Mar 1, 2018, Tim changes the script so that windowing is done in # the call to sig.per, and not externally. Pdb = 10 * np.log10(np.median(Pxx,0)) #%% #============================================================================== # plt.plot(data[idx[100:103,:]].T) # plt.show() # plt.plot(DATA[100:103,:].T) # plt.show() # #%% # plt.plot(freqs, 10* np.log10(Pxx[100:103,:].T)) # plt.plot(freqs, Pdb, 'k') # plt.xscale('log') # plt.xlim([0.1, 100]) # plt.ylim([-220, -140]) #============================================================================== #%% # print('Ready to return results from get_med_spectra') return freqs, Pdb, Fs
else: soundFrame = rec.get_data() # print(soundFrame[0:22]) # TODO: add proper header and trailer to data modulatedFrame = mod.modulateQAM16(soundFrame, isSignalUpconverted=True, debug=False) print(modulatedFrame[:100]) # plt.plot(np.real(modulatedFrame)) # plt.show() # TODO: remove cyclic buffer and feed data continuously sdr.tx(modulatedFrame) break # TODO: separate tx and rx in threads for _ in range(200): x = sdr.rx() f, Pxx_den = signal.periodogram(x, fs) plt.clf() plt.semilogy(f, Pxx_den) plt.ylim([1e-7, 1e2]) plt.xlabel("frequency [Hz]") plt.ylabel("PSD [V**2/Hz]") plt.draw() plt.pause(0.05) sleep(0.1) plt.show()
def dmg_col_50_2D(colnum): #INITIALIZING STUFF Nmitral = 50 Ngranule = np.copy(Nmitral) #number of granule cells pg. 383 of Li/Hop Ndim = Nmitral + Ngranule #total number of cells # t_inh = 25 ; # time when inhalation starts # t_exh = 205; #time when exhalation starts # Ndamagetotal = Nmitral*2 + 1 #number of damage steps Ndamage = 3 Ncols = int(Nmitral / 2) #define number of columns to damage finalt = 395 # end time of the cycle #y = zeros(ndim,1); P_odor0 = np.zeros((Nmitral, 1)) #odor pattern, no odor P_odor1 = P_odor0 + .00429 #Odor pattern 1 # P_odor2 = 1/70*np.array([.6,.5,.5,.5,.3,.6,.4,.5,.5,.5]) # P_odor3 = 4/700*np.array([.7,.8,.5,1.2,.7,1.2,.8,.7,.8,.8]) #control_odor = control_order + .00429 #control_odor = np.zeros((Nmitral,1)) #odor input for adaptation #controllevel = 1 #1 is full adaptation H0 = np.zeros((Nmitral, Ngranule)) #weight matrix: to mitral from granule W0 = np.zeros((Ngranule, Nmitral)) #weights: to granule from mitral H0 = np.load('H0_50_2D_60Hz.npy') #load weight matrix W0 = np.load('W0_50_2D_60Hz.npy') #load weight matrix W0_tot = np.sum(W0) #get sum of the weights Wdamaged = np.copy(W0) #H0 = H0 + H0*np.random.rand(np.shape(H0)) #W0 = W0+W0*np.random.rand(np.shape(W0)) M = 5 #average over 5 trials for each level of damage #initialize iterative variables d1it, d2it, d3it, d4it = np.zeros(M), np.zeros(M), np.zeros(M), np.zeros(M) IPRit, IPR2it, pnit = np.zeros(M), np.zeros(M), np.zeros(M) frequencyit = np.zeros(M) pwrit = np.zeros(M) yout2, Sh2 = np.zeros((finalt, Ndim)), np.zeros((finalt, Ndim)) psi = np.copy(Sh2[:, :Nmitral]) #initialize quantities to be returned at end of the process dmgpct1 = np.zeros(Ncols * (Ndamage - 1) + 1) eigfreq1 = np.zeros(Ncols * (Ndamage - 1) + 1) d11 = np.zeros(Ncols * (Ndamage - 1) + 1) d21 = np.zeros(Ncols * (Ndamage - 1) + 1) d31 = np.zeros(Ncols * (Ndamage - 1) + 1) d41 = np.zeros(Ncols * (Ndamage - 1) + 1) pwr1 = np.zeros(Ncols * (Ndamage - 1) + 1) IPR1 = np.zeros(Ncols * (Ndamage - 1) + 1) IPR2 = np.zeros(Ncols * (Ndamage - 1) + 1) pn1 = np.zeros(Ncols * (Ndamage - 1) + 1) freq1 = np.zeros(Ncols * (Ndamage - 1) + 1) cell_act = np.zeros((finalt, Ndim, Ncols * (Ndamage - 1) + 1)) Omean1,Oosci1,Omeanbar1,Ooscibar1 = np.zeros((Nmitral,M)),\ np.zeros((Nmitral,M))+0j,np.zeros(M),np.zeros(M)+0j for m in np.arange(M): yout,y0out,Sh,t,OsciAmp1,Omean1[:,m],Oosci1[:,m],Omeanbar1[m],\ Ooscibar1[m],freq0,maxlam = olf_bulb_10(Nmitral,H0,W0,P_odor1) counter = 0 #to get the right index for each of the measures for col in range(Ncols): cols = int(np.mod(colnum + col, Nmitral)) for lv in np.arange(Ndamage): #reinitialize all iterative variables to zero (really only need to do for distance measures, but good habit) d1it, d2it, d3it, d4it = np.zeros(M), np.zeros(M), np.zeros( M), np.zeros(M) IPRit, IPR2it, pnit = np.zeros(M), np.zeros(M), np.zeros(M) frequencyit = np.zeros(M) pwrit = np.zeros(M) if not ( lv == 0 and cols != colnum ): #if it's the 0th level for any but the original col, skip Wdamaged[:, cols] = W0[:, cols] * (1 - lv * (0.5)) Wdamaged[Wdamaged < 0] = 0 for m in np.arange(M): #Then get respons of damaged network yout2[:,:],y0out2,Sh2[:,:],t2,OsciAmp2,Omean2,Oosci2,Omeanbar2,\ Ooscibar2,freq2,grow_eigs2 = olf_bulb_10(Nmitral,H0,Wdamaged,P_odor1) print(colnum, ' lv ', lv, 'after ', time.time() - tm1) #calculate distance measures, comparing to 5 control trials for i in np.arange(M): d1it[m] += 1 - Omean1[:, m].dot(Omean2) / ( lin.norm(Omean1[:, m]) * lin.norm(Omean2)) d2it[m] += 1 - lin.norm(Oosci1[:, m].dot( np.conjugate(Oosci2))) / (lin.norm(Oosci1[:, m]) * lin.norm(Oosci2)) d3it[m] += (Omeanbar1[m] - Omeanbar2) / (Omeanbar1[m] + Omeanbar2) d4it[m] += np.real((Ooscibar1[m] - Ooscibar2) / (Ooscibar1[m] + Ooscibar2)) d1it[m] = d1it[ m] / M #average over comparison with the 5 control trials d2it[m] = d2it[m] / M d3it[m] = d3it[m] / M d4it[m] = d4it[m] / M #calculate spectral density and "wave function" to get average power and IPR P_den = np.zeros( (501, Nmitral)) #only calculate the spectral density from for i in np.arange( Nmitral ): #t=125 to t=250, during the main oscillations f, P_den[:, i] = signal.periodogram(Sh2[125:250, i], nfft=1000, fs=1000) psi = np.zeros(Nmitral) for p in np.arange(Nmitral): psi[p] = np.sum(P_den[:, p]) psi = psi / np.sqrt(np.sum(psi**2)) psi2 = np.copy(OsciAmp2) psi2 = psi2 / np.sqrt(np.sum(psi2**2)) maxAmp = np.max(OsciAmp2) pnit[m] = len(OsciAmp2[OsciAmp2 > maxAmp / 2]) IPRit[m] = 1 / np.sum(psi**4) IPR2it[m] = 1 / np.sum(psi2**4) pwrit[m] = np.sum(P_den) / Nmitral #get the frequency according to the adiabatic analysis maxargs = np.argmax(P_den, axis=0) argf = stats.mode(maxargs[maxargs != 0]) frequencyit[m] = f[argf[0][0]] # print(cols) # print(time.time()-tm1) # # print('level',lv) #Get the returned variables for each level of damage dmgpct1[counter] = np.sum(W0 - Wdamaged) / W0_tot IPR1[counter] = np.average(IPRit) #Had to do 1D list, so pwr1[counter] = np.average(pwrit) #it goes column 0 damage lvl freq1[counter] = np.average( frequencyit) #0,1,2,3,4...Ndamage-1, then #col 1 damage level 0,1,2... # IPRsd[lv]=np.std(IPRit) # pwrsd[lv]=np.std(pwrit) # freqsd[lv]=np.std(frequencyit) IPR2[counter] = np.average(IPR2it) pn1[counter] = np.average(pnit) d11[counter] = np.average(d1it) d21[counter] = np.average(d2it) d31[counter] = np.average(d3it) d41[counter] = np.average(d4it) # d1sd[lv] = np.std(d1it) # d2sd[lv] = np.std(d2it) # d3sd[lv]=np.std(d3it) # d4sd[lv]=np.std(d4it) eigfreq1[counter] = np.copy(freq2) cell_act[:, :, counter] = np.copy(yout2) counter += 1 return dmgpct1, eigfreq1, d11, d21, d31, d41, pwr1, IPR1, IPR2, pn1, freq1, cell_act
def power_spectrum(wave, fs): freq, P = signal.periodogram(wave, fs) return freq, P
def main(): ################################################# ## SETUP # Initialize subject order run log subj_list = [] ## Get list of subject files subj_files = listdir(DATA_PATH) subj_files = [file for file in subj_files if EXT.lower() in file.lower()] subj_files = sorted(subj_files) ## Set up FOOOF Objects # Initialize FOOOF settings & objects objects fooof_settings = FOOOFSettings(peak_width_limits=PEAK_WIDTH_LIMITS, max_n_peaks=MAX_N_PEAKS, min_peak_height=MIN_PEAK_HEIGHT, peak_threshold=PEAK_THRESHOLD, aperiodic_mode=APERIODIC_MODE) fm = FOOOF(*fooof_settings, verbose=False) fg = FOOOFGroup(*fooof_settings, verbose=False) # Save out a settings file fg.save('0-FOOOF_Settings', pjoin(RESULTS_PATH, 'FOOOF'), save_settings=True) # Set up the dictionary to store all the FOOOF results fg_dict = dict() for load_label in LOAD_LABELS: fg_dict[load_label] = dict() for side_label in SIDE_LABELS: fg_dict[load_label][side_label] = dict() for seg_label in SEG_LABELS: fg_dict[load_label][side_label][seg_label] = [] ## Initialize group level data stores n_subjs, n_conds, n_times = len(subj_files), 3, N_TIMES group_fooof_alpha_freqs = np.zeros(shape=[n_subjs]) group_indi_alpha_freqs = np.zeros(shape=[n_subjs]) dropped_components = np.ones(shape=[n_subjs, 50]) * 999 dropped_trials = np.ones(shape=[n_subjs, 1500]) * 999 canonical_group_avg_data = np.zeros(shape=[n_subjs, n_conds, n_times]) canonical_icf_group_avg_data = np.zeros(shape=[n_subjs, n_conds, n_times]) # Set channel types ch_types = { 'LHor': 'eog', 'RHor': 'eog', 'IVer': 'eog', 'SVer': 'eog', 'LMas': 'misc', 'RMas': 'misc', 'Nose': 'misc', 'EXG8': 'misc' } ################################################# ## RUN ACROSS ALL SUBJECTS # Run analysis across each subject for s_ind, subj_file in enumerate(subj_files): # Get subject label and print status subj_label = subj_file.split('.')[0] subj_list.append(subj_label) print('\nCURRENTLY RUNNING SUBJECT: ', subj_label, '\n') ################################################# ## LOAD / ORGANIZE / SET-UP DATA # Load subject of data, apply apply fixes for channels, etc eeg_data = mne.io.read_raw_bdf(pjoin(DATA_PATH, subj_file), preload=True, verbose=False) # Fix channel name labels eeg_data.info['ch_names'] = [chl[2:] for chl in \ eeg_data.ch_names[:-1]] + [eeg_data.ch_names[-1]] for ind, chi in enumerate(eeg_data.info['chs']): eeg_data.info['chs'][ind]['ch_name'] = eeg_data.info['ch_names'][ ind] # Update channel types eeg_data.set_channel_types(ch_types) # Set reference - average reference eeg_data = eeg_data.set_eeg_reference(ref_channels='average', projection=False, verbose=False) # Set channel montage chs = mne.channels.make_standard_montage('standard_1020') eeg_data.set_montage(chs, verbose=False) # Get event information & check all used event codes evs = mne.find_events(eeg_data, shortest_event=1, verbose=False) # Pull out sampling rate srate = eeg_data.info['sfreq'] ################################################# ## Pre-Processing: ICA # High-pass filter data for running ICA eeg_data.filter(l_freq=1., h_freq=None, fir_design='firwin') if RUN_ICA: print("\nICA: CALCULATING SOLUTION\n") # ICA settings method = 'fastica' n_components = 0.99 random_state = 47 reject = {'eeg': 20e-4} # Initialize ICA object ica = ICA(n_components=n_components, method=method, random_state=random_state) # Fit ICA ica.fit(eeg_data, reject=reject) # Save out ICA solution ica.save(pjoin(RESULTS_PATH, 'ICA', subj_label + '-ica.fif')) # Otherwise: load previously saved ICA to apply else: print("\nICA: USING PRECOMPUTED\n") ica = read_ica(pjoin(RESULTS_PATH, 'ICA', subj_label + '-ica.fif')) # Find components to drop, based on correlation with EOG channels drop_inds = [] for chi in EOG_CHS: inds, _ = ica.find_bads_eog(eeg_data, ch_name=chi, threshold=2.5, l_freq=1, h_freq=10, verbose=False) drop_inds.extend(inds) drop_inds = list(set(drop_inds)) # Set which components to drop, and collect record of this ica.exclude = drop_inds dropped_components[s_ind, 0:len(drop_inds)] = drop_inds # Apply ICA to data eeg_data = ica.apply(eeg_data) ################################################# ## SORT OUT EVENT CODES # Extract a list of all the event labels all_trials = [it for it2 in EV_DICT.values() for it in it2] # Create list of new event codes to be used to label correct trials (300s) all_trials_new = [it + 100 for it in all_trials] # This is an annoying way to collapse across the doubled event markers from above all_trials_new = [it - 1 if not ind % 2 == 0 else it \ for ind, it in enumerate(all_trials_new)] # Get labelled dictionary of new event names ev_dict2 = { ke: va for ke, va in zip(EV_DICT.keys(), set(all_trials_new)) } # Initialize variables to store new event definitions evs2 = np.empty(shape=[0, 3], dtype='int64') lags = np.array([]) # Loop through, creating new events for all correct trials t_min, t_max = -0.4, 3.0 for ref_id, targ_id, new_id in zip(all_trials, CORR_CODES * 6, all_trials_new): t_evs, t_lags = mne.event.define_target_events( evs, ref_id, targ_id, srate, t_min, t_max, new_id) if len(t_evs) > 0: evs2 = np.vstack([evs2, t_evs]) lags = np.concatenate([lags, t_lags]) # Sort event codes evs2 = np.sort(evs2, 0) ################################################# ## FOOOF - resting state data # Calculate PSDs over first 2 minutes of data fmin, fmax = 1, 50 tmin, tmax = 5, 125 psds, freqs = mne.time_frequency.psd_welch(eeg_data, fmin=fmin, fmax=fmax, tmin=tmin, tmax=tmax, n_fft=int(2 * srate), n_overlap=int(srate), n_per_seg=int(2 * srate), verbose=False) # Fit FOOOF across all channels fg.fit(freqs, psds, FREQ_RANGE) # Collect individual alpha peak from fooof ch_ind = eeg_data.ch_names.index(CHL) tfm = fg.get_fooof(ch_ind, False) fooof_freq, _, _ = get_band_peak_fm(tfm, BANDS.alpha) group_fooof_alpha_freqs[s_ind] = fooof_freq # Save out FOOOF results fg.save(subj_label + '_fooof', pjoin(RESULTS_PATH, 'FOOOF'), save_data=True, save_results=True) ################################################# ## ALPHA FILTERING - CANONICAL ALPHA # CANONICAL: Filter data to canonical alpha band: 8-12 Hz alpha_data = eeg_data.copy() alpha_data.filter(8, 12, fir_design='firwin', verbose=False) alpha_data.apply_hilbert(envelope=True, verbose=False) ################################################# ## ALPHA FILTERING - INDIVIDUALIZED PEAK ALPHA # Get individual power spectrum of interest cur_psd = psds[ch_ind, :] # Get the peak within the alpha range al_freqs, al_psd = trim_spectrum(freqs, cur_psd, [7, 14]) icf_ind = np.argmax(al_psd) subj_icf = al_freqs[icf_ind] # Collect individual alpha peak group_indi_alpha_freqs[s_ind] = subj_icf # CANONICAL: Filter data to individualized alpha alpha_icf_data = eeg_data.copy() alpha_icf_data.filter(subj_icf - 2, subj_icf + 2, fir_design='firwin', verbose=False) alpha_icf_data.apply_hilbert(envelope=True, verbose=False) ################################################# ## EPOCH TRIALS # Set epoch timings tmin, tmax = -0.85, 1.1 baseline = (-0.5, -0.35) # Epoch trials - raw data for trial rejection epochs = mne.Epochs(eeg_data, evs2, ev_dict2, tmin=tmin, tmax=tmax, baseline=None, preload=True, verbose=False) # Epoch trials - canonical alpha filtered version epochs_alpha = mne.Epochs(alpha_data, evs2, ev_dict2, tmin=tmin, tmax=tmax, baseline=baseline, preload=True, verbose=False) # Epoch trials - individualized alpha filtered version epochs_alpha_icf = mne.Epochs(alpha_icf_data, evs2, ev_dict2, tmin=tmin, tmax=tmax, baseline=baseline, preload=True, verbose=False) ################################################# ## PRE-PROCESSING: AUTO-REJECT if RUN_AUTOREJECT: print('\nAUTOREJECT: CALCULATING SOLUTION\n') # Initialize and run autoreject across epochs ar = AutoReject(n_jobs=4, verbose=False) ar.fit(epochs) # Save out AR solution ar.save(pjoin(RESULTS_PATH, 'AR', subj_label + '-ar.hdf5'), overwrite=True) # Otherwise: load & apply previously saved AR solution else: print('\nAUTOREJECT: USING PRECOMPUTED\n') ar = read_auto_reject( pjoin(RESULTS_PATH, 'AR', subj_label + '-ar.hdf5')) ar.verbose = 'tqdm' # Apply autoreject to the original epochs object it was learnt on epochs, rej_log = ar.transform(epochs, return_log=True) # Apply autoreject to the copies of the data - apply interpolation, then drop same epochs _apply_interp(rej_log, epochs_alpha, ar.threshes_, ar.picks_, ar.dots, ar.verbose) epochs_alpha.drop(rej_log.bad_epochs) _apply_interp(rej_log, epochs_alpha_icf, ar.threshes_, ar.picks_, ar.dots, ar.verbose) epochs_alpha_icf.drop(rej_log.bad_epochs) # Collect which epochs were dropped dropped_trials[s_ind, 0:sum(rej_log.bad_epochs)] = np.where( rej_log.bad_epochs)[0] ################################################# ## SET UP CHANNEL CLUSTERS # Set channel clusters - take channels contralateral to stimulus presentation # Note: channels will be used to extract data contralateral to stimulus presentation le_chs = ['P3', 'P5', 'P7', 'P9', 'O1', 'PO3', 'PO7'] # Left Side Channels le_inds = [epochs.ch_names.index(chn) for chn in le_chs] ri_chs = ['P4', 'P6', 'P8', 'P10', 'O2', 'PO4', 'PO8'] # Right Side Channels ri_inds = [epochs.ch_names.index(chn) for chn in ri_chs] ################################################# ## TRIAL-RELATED ANALYSIS: CANONICAL ALPHA ## Pull out channels of interest for each load level # Channels extracted are those contralateral to stimulus presentation # Canonical Data lo1_a = np.concatenate([ epochs_alpha['LeLo1']._data[:, ri_inds, :], epochs_alpha['RiLo1']._data[:, le_inds, :] ], 0) lo2_a = np.concatenate([ epochs_alpha['LeLo2']._data[:, ri_inds, :], epochs_alpha['RiLo2']._data[:, le_inds, :] ], 0) lo3_a = np.concatenate([ epochs_alpha['LeLo3']._data[:, ri_inds, :], epochs_alpha['RiLo3']._data[:, le_inds, :] ], 0) ## Calculate average across trials and channels - add to group data collection # Canonical data canonical_group_avg_data[s_ind, 0, :] = np.mean(lo1_a, 1).mean(0) canonical_group_avg_data[s_ind, 1, :] = np.mean(lo2_a, 1).mean(0) canonical_group_avg_data[s_ind, 2, :] = np.mean(lo3_a, 1).mean(0) ################################################# ## TRIAL-RELATED ANALYSIS: INDIVIDUALIZED ALPHA # Individualized Alpha Data lo1_a_icf = np.concatenate([ epochs_alpha_icf['LeLo1']._data[:, ri_inds, :], epochs_alpha_icf['RiLo1']._data[:, le_inds, :] ], 0) lo2_a_icf = np.concatenate([ epochs_alpha_icf['LeLo2']._data[:, ri_inds, :], epochs_alpha_icf['RiLo2']._data[:, le_inds, :] ], 0) lo3_a_icf = np.concatenate([ epochs_alpha_icf['LeLo3']._data[:, ri_inds, :], epochs_alpha_icf['RiLo3']._data[:, le_inds, :] ], 0) ## Calculate average across trials and channels - add to group data collection # Canonical data canonical_icf_group_avg_data[s_ind, 0, :] = np.mean(lo1_a_icf, 1).mean(0) canonical_icf_group_avg_data[s_ind, 1, :] = np.mean(lo2_a_icf, 1).mean(0) canonical_icf_group_avg_data[s_ind, 2, :] = np.mean(lo3_a_icf, 1).mean(0) ################################################# ## FOOOFING TRIAL AVERAGED DATA # Loop loop loads & trials segments for seg_label, seg_time in zip(SEG_LABELS, SEG_TIMES): tmin, tmax = seg_time[0], seg_time[1] # Calculate PSDs across trials, fit FOOOF models to averages for le_label, ri_label, load_label in zip( ['LeLo1', 'LeLo2', 'LeLo3'], ['RiLo1', 'RiLo2', 'RiLo3'], LOAD_LABELS): ## Calculate trial wise PSDs for left & right side trials trial_freqs, le_trial_psds = periodogram( epochs[le_label]. _data[:, :, _time_mask(epochs.times, tmin, tmax, srate)], srate, window='hann', nfft=4 * srate) trial_freqs, ri_trial_psds = periodogram( epochs[ri_label]. _data[:, :, _time_mask(epochs.times, tmin, tmax, srate)], srate, window='hann', nfft=4 * srate) ## FIT ALL CHANNELS VERSION if FIT_ALL_CHANNELS: ## Average spectra across trials within a given load & side le_avg_psd_contra = AVG_FUNC(le_trial_psds[:, ri_inds, :], 0) le_avg_psd_ipsi = AVG_FUNC(le_trial_psds[:, le_inds, :], 0) ri_avg_psd_contra = AVG_FUNC(ri_trial_psds[:, le_inds, :], 0) ri_avg_psd_ipsi = AVG_FUNC(ri_trial_psds[:, ri_inds, :], 0) ## Combine spectra across left & right trials for given load ch_psd_contra = np.vstack( [le_avg_psd_contra, ri_avg_psd_contra]) ch_psd_ipsi = np.vstack([le_avg_psd_ipsi, ri_avg_psd_ipsi]) ## Fit FOOOFGroup to all channels, average & and collect results fg.fit(trial_freqs, ch_psd_contra, FREQ_RANGE) afm = average_fg(fg, BANDS) fg_dict[load_label]['Contra'][seg_label].append(afm.copy()) fg.fit(trial_freqs, ch_psd_ipsi, FREQ_RANGE) afm = average_fg(fg, BANDS) fg_dict[load_label]['Ipsi'][seg_label].append(afm.copy()) ## COLLAPSE ACROSS CHANNELS VERSION else: ## Average spectra across trials and channels within a given load & side le_avg_psd_contra = AVG_FUNC( AVG_FUNC(le_trial_psds[:, ri_inds, :], 0), 0) le_avg_psd_ipsi = AVG_FUNC( AVG_FUNC(le_trial_psds[:, le_inds, :], 0), 0) ri_avg_psd_contra = AVG_FUNC( AVG_FUNC(ri_trial_psds[:, le_inds, :], 0), 0) ri_avg_psd_ipsi = AVG_FUNC( AVG_FUNC(ri_trial_psds[:, ri_inds, :], 0), 0) ## Collapse spectra across left & right trials for given load avg_psd_contra = AVG_FUNC( np.vstack([le_avg_psd_contra, ri_avg_psd_contra]), 0) avg_psd_ipsi = AVG_FUNC( np.vstack([le_avg_psd_ipsi, ri_avg_psd_ipsi]), 0) ## Fit FOOOF, and collect results fm.fit(trial_freqs, avg_psd_contra, FREQ_RANGE) fg_dict[load_label]['Contra'][seg_label].append(fm.copy()) fm.fit(trial_freqs, avg_psd_ipsi, FREQ_RANGE) fg_dict[load_label]['Ipsi'][seg_label].append(fm.copy()) ################################################# ## SAVE OUT RESULTS # Save out subject run log with open(pjoin(RESULTS_PATH, 'Group', 'subj_run_list.txt'), 'w') as f_obj: for item in subj_list: f_obj.write('{} \n'.format(item)) # Save out group data np.save(pjoin(RESULTS_PATH, 'Group', 'canonical_group'), canonical_group_avg_data) np.save(pjoin(RESULTS_PATH, 'Group', 'canonical_icf_group'), canonical_icf_group_avg_data) np.save(pjoin(RESULTS_PATH, 'Group', 'dropped_trials'), dropped_trials) np.save(pjoin(RESULTS_PATH, 'Group', 'dropped_components'), dropped_components) np.save(pjoin(RESULTS_PATH, 'Group', 'indi_alpha_peaks'), group_indi_alpha_freqs) np.save(pjoin(RESULTS_PATH, 'Group', 'fooof_alpha_peaks'), group_fooof_alpha_freqs) # Save out second round of FOOOFing for load_label in LOAD_LABELS: for side_label in SIDE_LABELS: for seg_label in SEG_LABELS: fg = combine_fooofs(fg_dict[load_label][side_label][seg_label]) fg.save('Group_' + load_label + '_' + side_label + '_' + seg_label, pjoin(RESULTS_PATH, 'FOOOF'), save_results=True)
for k in range(len(time)): if k > 1: enso_ra[k] = np.mean(enso[k - 2:k + 1]) elif k > 0: enso_ra[k] = np.mean(enso[k - 1:k + 1]) else: enso_ra[k] = enso[k] for mons in range(12): gm_mon = np.mean(enso_ra[mons::12]) enso_ra[mons::12] = enso_ra[mons::12] - gm_mon # fig = plt.figure(figsize=(8, 4)) gs = gridspec.GridSpec(1, 2) gs.update(wspace=0.1) ax = fig.add_subplot(gs[0]) f, Pxx_den = signal.periodogram(enso_ra[:480], 12.) # Autocorr lag1 #plt.plot(f, Pxx_den ,color='#EF3340',linewidth=2) ax.set_xlabel(r'Frequency / yr$^{-1}$') ax.set_ylabel('Spectral Power Density') #plt.ylim([9,8.E7]) ax.set_xlim([0, 1.0]) print str(round(1.0 / (f[np.where(Pxx_den == np.max(Pxx_den))[0][0]]), 2)) print str( round(1.0 / (f[np.where(Pxx_den == second_largest(Pxx_den))[0][0]]), 2)) #Data sst_new = np.genfromtxt( '/group_workspaces/jasmin2/ukca/vol2/dcw32/Obs/NINO_34.1870-2008.ANOM.txt') sst_new = np.reshape(sst_new[:, 1:], sst_new.shape[0] * 12) print sst_new.shape enso_ra = np.zeros(sst_new.shape[0])