def wave_width(wave, peak, thresh=0.25): p_loc, p_val = peak Len = wave.size if p_loc: w_start = find(wave[:p_loc] <= thresh * p_val, 1, "last") w_start = w_start[0] if w_start.size else 0 w_end = find(wave[p_loc:] <= thresh * p_val, 1, "first") w_end = p_loc + w_end[0] if w_end.size else Len else: w_start = 1 w_end = Len return w_end - w_start
def get_wave_pc(self, npc=2): """ Return the Principle Components of the waveforms. Parameters ---------- npc : int Number of principle components from waveforms of each channel Returns ------- pc : ndarray Principle components (num_waves X npc*num_channels) """ wave = self.get_waveform() pc = np.array([]) for key, w in wave.items(): pca = PCA(n_components=5) w_new = pca.fit_transform(w) pc_var = pca.explained_variance_ratio_ if npc and npc < w_new.shape[1]: w_new = w_new[:, :npc] else: w_new = w_new[:, 0:( find(np.cumsum(pc_var) >= 0.95, 1, 'first')[0] + 1)] if not len(pc): pc = w_new else: pc = np.append(pc, w_new, axis=1) return pc
def cluster_separation(self, unit_no=0): """ Measure the separation of a specific unit from other clusters. This is performed quantitatively using the following: 1. Bhattacharyya coefficient 2. Hellinger distance Parameters ---------- unit_no : int Unit of interest. If '0', pairwise comparison of all units are returned. Returns ------- (bc, dh) : (ndarray, ndarray) bc : ndarray Bhattacharyya coefficient dh : ndarray Hellinger distance """ # if unit_no==0 all units, matrix output for pairwise comparison, # else maximum BC for the specified unit feat = self.get_feat() unit_list = self.get_unit_list() n_units = len(unit_list) if unit_no == 0: bc = np.zeros([n_units, n_units]) dh = np.zeros([n_units, n_units]) for c1 in np.arange(n_units): for c2 in np.arange(n_units): X1 = feat[self.get_unit_tags() == unit_list[c1], :] X2 = feat[self.get_unit_tags() == unit_list[c2], :] bc[c1, c2] = bhatt(X1, X2)[0] dh[c1, c2] = hellinger(X1, X2) unit_list = self.get_unit_list() return bc, dh else: bc = np.zeros(n_units) dh = np.zeros(n_units) X1 = feat[self.get_unit_tags() == unit_no, :] for c2 in np.arange(n_units): if c2 == unit_no: bc[c2] = 0 dh[c2] = 1 else: X2 = feat[self.get_unit_tags() == unit_list[c2], :] bc[c2] = bhatt(X1, X2)[0] dh[c2] = hellinger(X1, X2) idx = find(np.array(unit_list) != unit_no) return bc[idx], dh[idx]
def grid(grid_data): """ Plots the results from grid analysis Parameters ---------- grid_data : dict Graphical data from border analysis Returns ------- fig1 : matplotlib.pyplot.Figure Autocorrelation of firing rate map, superimposed with central peaks fig2 : matplotlib.pyplot.Figure Rotational correlation of autocorrelation map """ fig1 = loc_auto_corr(grid_data) ax = fig1.axes[0] xmax = grid_data['xmax'] ymax = grid_data['ymax'] xshift = grid_data['xshift'] ax.scatter(xmax, ymax, c='black', marker='s', zorder=2) for i in range(xmax.size): if i < xmax.size-1: ax.plot([xmax[i], xmax[i+1]], [ymax[i], ymax[i+1]], 'k', linewidth=2) else: ax.plot([xmax[i], xmax[0]], [ymax[i], ymax[0]], 'k', linewidth=2) ax.plot(xshift[xshift >= 0], np.zeros(find(xshift >= 0).size), 'k--', linewidth=2) ax.plot(xshift[xshift >= 0], xshift[xshift >= 0]*ymax[0]/ xmax[0], 'k--', linewidth=2) ax.set_title('Grid cell analysis') ax.set_xlim([grid_data['xshift'].min(), grid_data['xshift'].max()]) ax.set_ylim([grid_data['yshift'].min(), grid_data['yshift'].max()]) ax.invert_yaxis() fig2 = None if 'rotAngle' in grid_data.keys() and 'rotCorr' in grid_data.keys(): fig2 = rot_corr(grid_data) ax = fig2.axes[0] rmax = grid_data['rotCorr'].max() rmin = grid_data['rotCorr'].min() for i, th in enumerate(grid_data['anglemax']): ax.plot([th, th], [rmin, rmax], 'r--', linewidth=1) for i, th in enumerate(grid_data['anglemin']): ax.plot([th, th], [rmin, rmax], 'g--', linewidth=1) ax.autoscale(enable=True, axis='both', tight=True) ax.set_title('Rotational correlation of autocorrelation map') if fig2: return fig1, fig2 else: return fig1
def circ_scatter(self, bins=2, step=0.05, rmax=None): """ Prepare data for circular scatter plot. For each theta in a bin, the radius is increased by 'step' The size of step is capped at 'rmax'. Parameters ---------- bins : int Angular binsize for the circular scatter step : float Stepsize to increase the radius for each count of theta rmax : float Maximum value for the radius Returns ------- radius : ndarray Radius for the theta values. For each new theta in a bin, the radius is increased by 'step'. theta : ndarray Binned theta samples """ count, ind, bins = self.circ_histogram(bins=2) radius = np.ones(ind.shape) theta = np.zeros(ind.shape) for i, b in enumerate(bins): rad = ( np.ones(find(ind == i).shape) + np.array( list(step * j for j, loc in enumerate(find(ind == i))) ) ) if rmax: rad[rad > rmax] = rmax radius[ind == i] = rad theta[ind == i] = b return radius, theta
def grid_down(self, ftimes, other_spatial, other_ftimes, **kwargs): """ Perform grid cell analysis after downsampling. Analysis of Grid cells characterised by formation of grid-like pattern of high activity in the firing-rate map Parameters ---------- ftimes : ndarray Timestamps of the spiking activity of a unit other_spatial : NSpatial The spatial data to downsample to. other_ftimes : list or ndarray The firing times of the cell in other spatial. **kwargs Keyword arguments Returns ------- dict Graphical data of the analysis """ _results = oDict() tol = kwargs.get('angtol', 2) binsize = kwargs.get('binsize', 3) bins = np.arange(0, 360, binsize) graph_data = self.loc_auto_corr_down( ftimes, other_spatial, other_ftimes, update=False, **kwargs) corrMap = graph_data['corrMap'] corrMap[np.isnan(corrMap)] = 0 xshift = graph_data['xshift'] yshift = graph_data['yshift'] pixel = np.int(np.diff(xshift).mean()) ny, nx = corrMap.shape rpeaks = np.zeros(corrMap.shape, dtype=bool) cpeaks = np.zeros(corrMap.shape, dtype=bool) for j in np.arange(ny): rpeaks[j, extrema(corrMap[j, :])[1]] = True for i in np.arange(nx): cpeaks[extrema(corrMap[:, i])[1], i] = True ymax, xmax = find2d(np.logical_and(rpeaks, cpeaks)) peakDist = np.sqrt((ymax - find(yshift == 0))**2 + (xmax - find(xshift == 0))**2) sortInd = np.argsort(peakDist) ymax, xmax, peakDist = ymax[sortInd], xmax[sortInd], peakDist[sortInd] ymax, xmax, peakDist = ( ymax[1:7], xmax[1:7], peakDist[1:7]) if ymax.size >= 7 else ([], [], []) theta = np.arctan2(yshift[ymax], xshift[xmax]) * 180 / np.pi theta[theta < 0] += 360 sortInd = np.argsort(theta) ymax, xmax, peakDist, theta = ( ymax[sortInd], xmax[sortInd], peakDist[sortInd], theta[sortInd]) graph_data['ymax'] = yshift[ymax] graph_data['xmax'] = xshift[xmax] meanDist = peakDist.mean() X, Y = np.meshgrid(xshift, yshift) distMat = np.sqrt(X**2 + Y**2) / pixel maskInd = np.logical_and( distMat > 0.5 * meanDist, distMat < 1.5 * meanDist) rotCorr = np.array([corr_coeff(rot_2d(corrMap, theta)[ maskInd], corrMap[maskInd]) for k, theta in enumerate(bins)]) ramax, rimax, ramin, rimin = extrema(rotCorr) mThetaPk, mThetaTr = (np.diff(bins[rimax]).mean(), np.diff( bins[rimin]).mean()) if rimax.size and rimin.size else (None, None) graph_data['rimax'] = rimax graph_data['rimin'] = rimin graph_data['anglemax'] = bins[rimax] graph_data['anglemin'] = bins[rimin] graph_data['rotAngle'] = bins graph_data['rotCorr'] = rotCorr if mThetaPk is not None and mThetaTr is not None: isGrid = True if 60 - tol < mThetaPk < 60 + \ tol and 60 - tol < mThetaTr < 60 + tol else False else: isGrid = False meanAlpha = np.diff(theta).mean() psi = theta[np.array([2, 3, 4, 5, 0, 1])] - theta psi[psi < 0] += 360 meanPsi = psi.mean() _results["First Check"] = (len(ymax) == np.logical_and( peakDist > 0.75 * meanDist, peakDist < 1.25 * meanDist).sum()) _results['Is Grid'] = isGrid and 120 - tol < meanPsi < 120 + \ tol and 60 - tol < meanAlpha < 60 + tol _results['Grid Mean Alpha'] = meanAlpha _results['Grid Mean Psi'] = meanPsi _results['Grid Spacing'] = meanDist * pixel # Difference between highest Pearson R at peaks and lowest at troughs _results['Grid Score'] = rotCorr[rimax].max() - \ rotCorr[rimin].min() _results['Grid Orientation'] = theta[0] self.update_result(_results) return graph_data
def always_grid(self, ftimes, **kwargs): """ This outputs grid cell statistics even if the cell is clearly not grid. Recommended to use NeuroChaTs method, this was to test the values in non grid situations. Analysis of Grid cells characterised by formation of grid-like pattern of high activity in the firing-rate map. Parameters ---------- ftimes : ndarray Timestamps of the spiking activity of a unit **kwargs Keyword arguments Returns ------- dict Graphical data of the analysis """ _results = oDict() tol = kwargs.get("angtol", 2) binsize = kwargs.get("binsize", 3) bins = np.arange(0, 360, binsize) graph_data = self.loc_auto_corr(ftimes, update=False, **kwargs) corrMap = graph_data["corrMap"] corrMap[np.isnan(corrMap)] = 0 xshift = graph_data["xshift"] yshift = graph_data["yshift"] pixel = np.int(np.diff(xshift).mean()) ny, nx = corrMap.shape rpeaks = np.zeros(corrMap.shape, dtype=bool) cpeaks = np.zeros(corrMap.shape, dtype=bool) for j in np.arange(ny): rpeaks[j, extrema(corrMap[j, :])[1]] = True for i in np.arange(nx): cpeaks[extrema(corrMap[:, i])[1], i] = True ymax, xmax = find2d(np.logical_and(rpeaks, cpeaks)) peakDist = np.sqrt((ymax - find(yshift == 0))**2 + (xmax - find(xshift == 0))**2) sortInd = np.argsort(peakDist) ymax, xmax, peakDist = ymax[sortInd], xmax[sortInd], peakDist[sortInd] ymax, xmax, peakDist = ((ymax[1:7], xmax[1:7], peakDist[1:7]) if ymax.size >= 7 else ([], [], [])) theta = np.arctan2(yshift[ymax], xshift[xmax]) * 180 / np.pi theta[theta < 0] += 360 sortInd = np.argsort(theta) ymax, xmax, peakDist, theta = ( ymax[sortInd], xmax[sortInd], peakDist[sortInd], theta[sortInd], ) graph_data["ymax"] = yshift[ymax] graph_data["xmax"] = xshift[xmax] meanDist = peakDist.mean() X, Y = np.meshgrid(xshift, yshift) distMat = np.sqrt(X**2 + Y**2) / pixel _results["First Check"] = (len(ymax) == np.logical_and( peakDist > 0.75 * meanDist, peakDist < 1.25 * meanDist).sum()) maskInd = np.logical_and(distMat > 0.5 * meanDist, distMat < 1.5 * meanDist) rotCorr = np.array([ corr_coeff(rot_2d(corrMap, theta)[maskInd], corrMap[maskInd]) for k, theta in enumerate(bins) ]) ramax, rimax, ramin, rimin = extrema(rotCorr) mThetaPk, mThetaTr = ((np.diff(bins[rimax]).mean(), np.diff( bins[rimin]).mean()) if rimax.size and rimin.size else (None, None)) graph_data["rimax"] = rimax graph_data["rimin"] = rimin graph_data["anglemax"] = bins[rimax] graph_data["anglemin"] = bins[rimin] graph_data["rotAngle"] = bins graph_data["rotCorr"] = rotCorr if mThetaPk is not None and mThetaTr is not None: isGrid = (True if 60 - tol < mThetaPk < 60 + tol and 60 - tol < mThetaTr < 60 + tol else False) else: isGrid = False meanAlpha = np.diff(theta).mean() psi = theta[np.array([2, 3, 4, 5, 0, 1])] - theta psi[psi < 0] += 360 meanPsi = psi.mean() _results["Is Grid"] = (isGrid and 120 - tol < meanPsi < 120 + tol and 60 - tol < meanAlpha < 60 + tol) _results["Grid Mean Alpha"] = meanAlpha _results["Grid Mean Psi"] = meanPsi _results["Grid Spacing"] = meanDist * pixel # Difference between highest Pearson R at peaks and lowest at troughs _results["Grid Score"] = rotCorr[rimax].max() - rotCorr[rimin].min() _results["Grid Orientation"] = theta[0] self.update_result(_results) return graph_data
def plv(self, event_stamp, **kwargs): """ Calculates phase-locking value of the spike train to underlying LFP signal. When 'mode'= None in the inpput kwargs, it calculates the PLV and SFC over the entire spike-train. If 'mode'= 'bs', it bootstraps the spike-timestamps and calculates the locking values for each set of new spike timestamps. If 'mode'= 'tr', a time-resilved phase-locking analysis is performed where the LFP signal is split into overlapped segments for each calculation. Parameters ---------- evnet_stamp : ndarray Timestamps of the events or the spiking activities for measuring the phase locking **kwargs Keywrod arguments Returns ------- dict Graphical data of the analysis """ graph_data = oDict() lfp = self.get_samples() * 1000 Fs = self.get_sampling_rate() time = self.get_timestamp() window = np.array(kwargs.get('window', [-0.5, 0.5])) win = np.ceil(window * Fs).astype(int) win = np.arange(win[0], win[1]) slep_win = sg.hann(win.size, False) nfft = kwargs.get('nfft', 1024) mode = kwargs.get( 'mode', None) # None, 'bs', 'tr' bs=bootstrp, tr=time-resolved fwin = kwargs.get('fwin', []) xf = np.arange(0, Fs, Fs / nfft) f = xf[0:int(nfft / 2) + 1] ind = np.arange(f.size) if len(fwin) == 0 else find( np.logical_and(f >= fwin[0], f <= fwin[1])) if mode == 'bs': nsample = kwargs.get('nsample', 50) nrep = kwargs.get('nrep', 500) STA = np.empty([nrep, win.size]) fSTA = np.empty([nrep, ind.size]) STP = np.empty([nrep, ind.size]) SFC = np.empty([nrep, ind.size]) PLV = np.empty([nrep, ind.size]) for i in np.arange(nrep): data = self.plv(np.random.choice(event_stamp, nsample, False), \ window=window, nfft=nfft, mode=None, fwin=fwin) t = data['t'] STA[i, :] = data['STA'] fSTA[i, :] = data['fSTA'] STP[i, :] = data['STP'] SFC[i, :] = data['SFC'] PLV[i, :] = data['PLV'] graph_data['t'] = t graph_data['f'] = f[ind] graph_data['STAm'] = STA.mean(0) graph_data['fSTAm'] = fSTA.mean(0) graph_data['STPm'] = STP.mean(0) graph_data['SFCm'] = SFC.mean(0) graph_data['PLVm'] = PLV.mean(0) graph_data['STAe'] = stats.sem(STA, 0) graph_data['fSTAe'] = stats.sem(fSTA, 0) graph_data['STPe'] = stats.sem(STP, 0) graph_data['SFCe'] = stats.sem(SFC, 0) graph_data['PLVe'] = stats.sem(PLV, 0) elif mode == 'tr': nsample = kwargs.get('nsample', None) slide = kwargs.get('slide', 25) # in ms slide = slide / 1000 # convert to sec offset = np.arange(window[0], window[-1], slide) nwin = offset.size # STA = np.empty([nwin, win.size]) fSTA = np.empty([nwin, ind.size]) STP = np.empty([nwin, ind.size]) SFC = np.empty([nwin, ind.size]) PLV = np.empty([nwin, ind.size]) if nsample is None or nsample > event_stamp.size: stamp = event_stamp else: stamp = np.random.choice(event_stamp, nsample, False) for i in np.arange(nwin): data = self.plv(stamp + offset[i], \ nfft=nfft, mode=None, fwin=fwin, window=window) t = data['t'] fSTA[i, :] = data['fSTA'] STP[i, :] = data['STP'] SFC[i, :] = data['SFC'] PLV[i, :] = data['PLV'] graph_data['offset'] = offset graph_data['f'] = f[ind] graph_data['fSTA'] = fSTA.transpose() graph_data['STP'] = STP.transpose() graph_data['SFC'] = SFC.transpose() graph_data['PLV'] = PLV.transpose() elif mode is None: center = time.searchsorted(event_stamp) # Keep windows within data center = np.array([center[i] for i in range(0, len(event_stamp)) \ if center[i] + win[0] >= 0 and center[i] + win[-1] <= time.size]) sta_data = self.event_trig_average(event_stamp, **kwargs) STA = sta_data['ETA'] fSTA = fft(np.multiply(STA, slep_win), nfft) fSTA = np.absolute(fSTA[0:int(nfft / 2) + 1])**2 / nfft**2 fSTA[1:-1] = 2 * fSTA[1:-1] fLFP = np.array([fft(np.multiply(lfp[x+ win], slep_win), nfft) \ for x in center]) STP = np.absolute(fLFP[:, 0:int(nfft / 2) + 1])**2 / nfft**2 STP[:, 1:-1] = 2 * STP[:, 1:-1] STP = STP.mean(0) SFC = np.divide(fSTA, STP) * 100 PLV = np.copy(fLFP) # Normalize PLV = np.divide(PLV, np.absolute(PLV)) PLV[np.isnan(PLV)] = 0 PLV = np.absolute(PLV.mean(0))[0:int(nfft / 2) + 1] PLV[1:-1] = 2 * PLV[1:-1] graph_data['t'] = sta_data['t'] graph_data['f'] = f[ind] graph_data['STA'] = STA graph_data['fSTA'] = fSTA[ind] graph_data['STP'] = STP[ind] graph_data['SFC'] = SFC[ind] graph_data['PLV'] = PLV[ind] return graph_data
def phase_dist(self, event_stamp, **kwargs): """ Analysis of spike to LFP phase distribution Parameters ---------- evnet_stamp : ndarray Timestamps of the events of spiking activities for measring the phase distribution **kwargs Keywrod arguments Returns ------- dict Graphical data of the analysis """ _results = oDict() graph_data = oDict() cs = CircStat() lfp = self.get_samples() * 1000 Fs = self.get_sampling_rate() time = self.get_timestamp() # Input parameters bins = int(360 / kwargs.get('binsize', 5)) rbinsize = kwargs.get('rbinsize', 2) # raster binsize rbins = int(360 / rbinsize) fwin = kwargs.get('fwin', [6, 12]) pratio = kwargs.get('pratio', 0.2) aratio = kwargs.get('aratio', 0.15) # Filter fmax = fwin[1] fmin = fwin[0] _filter = [5, fmin, fmax, 'bandpass'] _prefilt = kwargs.get('filtset', [10, 1.5, 40, 'bandpass']) b_lfp = butter_filter(lfp, Fs, *_filter) # band LFP lfp = butter_filter(lfp, Fs, *_prefilt) # Measure phase hilb = sg.hilbert(b_lfp) # self.hilb = hilb # phase = np.remainder(np.angle(hilb, deg=True)+ 360, 360) phase = np.angle(hilb, deg=True) phase[phase < 0] = phase[phase < 0] + 360 mag = np.abs(hilb) ephase = np.interp(event_stamp, time, phase) p2p = np.abs(np.max(lfp) - np.min(lfp)) xline = 0.5 * np.mean(mag) # cross line # Detection algo # zero cross mag1 = mag[0:-3] mag2 = mag[1:-2] mag3 = mag[2:-1] xind = np.union1d(find(np.logical_and(mag1 < xline, mag2 > xline)), \ find(np.logical_and(np.logical_and(mag1 < xline, mag2 == xline), mag3 > xline))) # Ignore segments <1/fmax i = 0 rcount = np.empty([ 0, ]) bcount = np.empty([0, 0]) phBins = np.arange(0, 360, 360 / bins) rbins = np.arange(0, 360, 360 / rbins) seg_count = 0 while i < len(xind) - 1: k = i + 1 while time[xind[k]] - time[xind[i]] < 1 / fmin and k < len( xind) - 1: k += 1 # print(time[xind[i]], time[xind[k]]) s_lfp = lfp[xind[i]:xind[k]] s_p2p = np.abs(np.max(s_lfp) - np.min(s_lfp)) if s_p2p >= aratio * p2p: s_psd, f = fft_psd(s_lfp, Fs) if np.sum(s_psd[np.logical_and( f >= fmin, f <= fmax)]) > pratio * np.sum(s_psd): # Phase distribution s_phase = ephase[np.logical_and( event_stamp > time[xind[i]], event_stamp <= time[xind[k]])] # print(s_phase.shape, s_phase.shape) if not s_phase.shape[0]: pass else: seg_count += 1 cs.set_theta(s_phase) temp_count = cs.circ_histogram(bins=rbinsize) # temp_count = np.histogram(s_phase, bins=rbins, range=[0, 360]) temp_count = temp_count[0] if not rcount.size: rcount = temp_count else: rcount = np.append(rcount, temp_count) temp_count = np.histogram(s_phase, bins=bins, range=[0, 360]) temp_count = np.resize(temp_count[0], [1, bins]) if not len(bcount): bcount = temp_count else: bcount = np.append(bcount, temp_count, axis=0) i = k rcount = rcount.reshape([seg_count, rbins.size]) phCount = np.sum(bcount, axis=0) cs.set_rho(phCount) cs.set_theta(phBins) cs.calc_stat() result = cs.get_result() meanTheta = result['meanTheta'] * np.pi / 180 _results['LFP Spike Mean Phase'] = result['meanTheta'] _results['LFP Spike Mean Phase Count'] = result['meanRho'] _results['LFP Spike Phase Res Vect'] = result['resultant'] graph_data['meanTheta'] = meanTheta graph_data['phCount'] = phCount graph_data['phBins'] = phBins graph_data['raster'] = rcount graph_data['rasterbins'] = rbins self.update_result(_results) return graph_data
def spectrum(self, **kwargs): """ Analyses frequency spectrum of the LFP signal Parameters ---------- **kwargs Keywrod arguments Returns ------- dict Graphical data of the analysis """ graph_data = oDict() Fs = self.get_sampling_rate() lfp = self.get_samples() window = kwargs.get('window', 1.0) window = sg.get_window('hann', int(window*Fs)) if isinstance(window, float)\ or isinstance(window, int) else window win_sec = np.ceil(window.size / Fs) noverlap = kwargs.get('noverlap', 0.5 * win_sec) noverlap = noverlap if noverlap < win_sec else 0.5 * win_sec noverlap = np.ceil(noverlap * Fs) nfft = kwargs.get('nfft', 2 * Fs) nfft = np.power(2, int(np.ceil(np.log2(nfft)))) ptype = kwargs.get('ptype', 'psd') ptype = 'spectrum' if ptype == 'power' else 'density' prefilt = kwargs.get('prefilt', True) _filter = kwargs.get('filtset', [10, 1.5, 40, 'bandpass']) fmax = kwargs.get('fmax', Fs / 2) if prefilt: lfp = butter_filter(lfp, Fs, *_filter) tr = kwargs.get('tr', False) db = kwargs.get('db', False) if tr: f, t, Sxx = sg.spectrogram(lfp, fs=Fs, \ window=window, nperseg=window.size, noverlap=noverlap, nfft=nfft, \ detrend='constant', return_onesided=True, scaling=ptype) graph_data['t'] = t graph_data['f'] = f[find(f <= fmax)] if db: Sxx = 10 * np.log10(Sxx / np.amax(Sxx)) Sxx = Sxx.flatten() Sxx[find(Sxx < -40)] = -40 Sxx = np.reshape(Sxx, [f.size, t.size]) # graph_data['Sxx'] = np.empty([find(f<= fmax).size, t.size]) # graph_data['Sxx'] = np.array([Sxx[i, :] for i in find(f<= fmax)]) graph_data['Sxx'] = Sxx[find(f <= fmax), :] else: f, Pxx = sg.welch(lfp, fs=Fs, \ window=window, nperseg=window.size, noverlap=noverlap, nfft=nfft, \ detrend='constant', return_onesided=True, scaling=ptype) graph_data['f'] = f[find(f <= fmax)] if db: Pxx = 10 * np.log10(Pxx / Pxx.max()) Pxx[find(Pxx < -40)] = -40 graph_data['Pxx'] = Pxx[find(f <= fmax)] return graph_data