def window(f,start,stop,type='blackman'): """ runs the data through a hamming window. @param f: The data matrix @param start: The start index of the hamming window. @param stop: The end index of the hamming window. """ h=numpy.zeros(f.shape,dtype=float) if len(h.shape)==1: if type=='hamming': h[start:stop]=signal.hamming(stop-start) elif type=='blackman': h[start:stop]=signal.blackman(stop-start) elif type=='hann': h[start:stop]=signal.hann(stop-start) elif type=='blackmanharris': h[start:stop]=signal.blackmanharris(stop-start) elif type=='rectangular' or type=='rect' or type=='boxcar': h[start:stop]=signal.boxcar(stop-start) else: if type=='hamming': h[:,start:stop]=signal.hamming(stop-start) elif type=='blackman': h[:,start:stop]=signal.blackman(stop-start) elif type=='hann': h[:,start:stop]=signal.hann(stop-start) elif type=='blackmanharris': h[:,start:stop]=signal.blackmanharris(stop-start) elif type=='rectangular' or type=='rect' or type=='boxcar': h[:,start:stop]=signal.boxcar(stop-start) return numpy.multiply(f,h)
def window(f, start, stop, type='blackman'): """ runs the data through a hamming window. @param f: The data matrix @param start: The start index of the hamming window. @param stop: The end index of the hamming window. """ h = numpy.zeros(f.shape, dtype=float) if len(h.shape) == 1: if type == 'hamming': h[start:stop] = signal.hamming(stop - start) elif type == 'blackman': h[start:stop] = signal.blackman(stop - start) elif type == 'hann': h[start:stop] = signal.hann(stop - start) elif type == 'blackmanharris': h[start:stop] = signal.blackmanharris(stop - start) elif type == 'rectangular' or type == 'rect' or type == 'boxcar': h[start:stop] = signal.boxcar(stop - start) else: if type == 'hamming': h[:, start:stop] = signal.hamming(stop - start) elif type == 'blackman': h[:, start:stop] = signal.blackman(stop - start) elif type == 'hann': h[:, start:stop] = signal.hann(stop - start) elif type == 'blackmanharris': h[:, start:stop] = signal.blackmanharris(stop - start) elif type == 'rectangular' or type == 'rect' or type == 'boxcar': h[:, start:stop] = signal.boxcar(stop - start) return numpy.multiply(f, h)
def blur_image(im, n, ny=None, ftype='boxcar'): """ blurs the image by convolving with a filter ('gaussian' or 'boxcar') of size n. The optional keyword argument ny allows for a different size in the y direction. """ n = int(n) if not ny: ny = n else: ny = int(ny) # keep track of nans nan_idx = np.isnan(im) im[nan_idx] = 0 if ftype == 'boxcar': if np.ndim(im) == 1: g = boxcar(n) / float(n) elif np.ndim(im) == 2: g = boxcar([n, ny]) / float(n) elif ftype == 'gaussian': x, y = np.mgrid[-n:n + 1, -ny:ny + 1] g = np.exp(-(x**2 / float(n) + y**2 / float(ny))) if np.ndim(im) == 1: g = g[n, :] g = g / g.sum() improc = convolve(im, g, mode='same') improc[nan_idx] = np.nan return improc
def psd(self, **kwargs): """ Calculate the one-sided non-windowed power spectrum of the light curve. This uses the :func:`matplotlib.mlab.psd` function for computing the power spectrum, with a single non-overlapping FFT. Parameters ---------- column : str, optional The column of the lightcurve which should be analysed. Returns ------- sk : array-like The Power spectral density of the light curve. f : array-like An array of the frequencies. """ data = self.data if isinstance(data, pd.core.frame.DataFrame): # If the supplied data is a pandas DataFrame we'll need to # decide if we're working on just one column, or all of them. if "column" in kwargs: column = kwargs["column"] dataw = np.array(data[column]) dataw = self.nan_interp(dataw) l = len(dataw) sk, f = ml.psd(x=dataw, window=signal.boxcar(l), noverlap=0, NFFT=l, Fs=self.fs(), sides='onesided') else: sk = {} f = {} for column in data.columns.values.tolist(): dataw = np.array(data[column]) dataw = self.nan_interp(dataw) l = len(dataw) sk[column], f[column] = ml.psd(x=dataw, window=signal.boxcar(l), noverlap=0, NFFT=l, Fs=self.fs(), sides='onesided') # return power spectral density and array of frequencies return sk, f
def blurImage(im, n, ny=None, ftype='boxcar'): """ Smooths a 2D image by convolving with a filter Parameters ---------- im : array_like The array to smooth n, ny : int The size of the smoothing kernel ftype : str The type of smoothing kernel. Either 'boxcar' or 'gaussian' Returns ------- res: array_like The smoothed vector with shape the same as im """ from scipy import signal n = int(n) if not ny: ny = n else: ny = int(ny) # keep track of nans nan_idx = np.isnan(im) im[nan_idx] = 0 g = signal.boxcar(n) / float(n) if 'box' in ftype: if im.ndim == 1: g = signal.boxcar(n) / float(n) elif im.ndim == 2: g = signal.boxcar(n) / float(n) g = np.tile(g, (1, ny, 1)) g = g / g.sum() g = np.squeeze(g) # extra dim introduced in np.tile above elif im.ndim == 3: # mutlidimensional binning g = signal.boxcar(n) / float(n) g = np.tile(g, (1, ny, 1)) g = g / g.sum() elif 'gaussian' in ftype: x, y = np.mgrid[-n:n + 1, 0 - ny:ny + 1] g = np.exp(-(x**2 / float(n) + y**2 / float(ny))) g = g / g.sum() if np.ndim(im) == 1: g = g[n, :] if np.ndim(im) == 3: g = np.tile(g, (1, ny, 1)) improc = signal.convolve(im, g, mode='same') improc[nan_idx] = np.nan return improc
def plot_spec(root='star', root2='star5', wmin=850, wmax=1850, smooth=21): ''' Multi-panel plot comparing a CMFGen and Python model in the UV ''' star = ascii.read(root + '.spec') star['nuFnu'] = star['Lambda'] * star['A45P0.50'] star_nufnu = convolve(star['nuFnu'], boxcar(smooth) / float(smooth), mode='same') star2 = ascii.read(root2 + '.spec') star2['nuFnu'] = star2['Lambda'] * star2['A45P0.50'] star2_nufnu = convolve(star2['nuFnu'], boxcar(smooth) / float(smooth), mode='same') plt.figure(1, (8, 12)) plt.subplot(311) plt.plot(star['Lambda'], star_nufnu, label=root) plt.plot(star2['Lambda'], star2_nufnu, label=root2) plt.legend(loc='best') plt.xlim(850, 1200) # plt.ylabel(r'$\nu F_{\nu}$ (ergs cm$^{-1}$s$^{-1}$)') # plt.xlabel(r'Wavelength ($\AA$)') add_lines() plt.subplot(312) plt.plot(star['Lambda'], star_nufnu, label=root) plt.plot(star2['Lambda'], star2_nufnu, label=root2) plt.legend(loc='best') plt.xlim(1150, 1500) plt.ylabel(r'$\nu F_{\nu}$ (ergs cm$^{-1}$s$^{-1}$)', size=16) # plt.xlabel(r'Wavelength ($\AA$)') add_lines() plt.subplot(313) plt.plot(star['Lambda'], star_nufnu, label=root) plt.plot(star2['Lambda'], star2_nufnu, label=root2) plt.legend(loc='best') plt.xlim(1450, 1800) # plt.ylabel(r'$\nu F_{\nu}$ (ergs cm$^{-1}$s$^{-1}$)') plt.xlabel(r'Wavelength ($\AA$)', size=16) add_lines() plt.savefig('%s_%s.png' % (root, root2)) return
def boxfilter(self, cutoff): """Filter the data using a boxcar filter and store the values in filteredseries""" for i in range(len(self.recarray[0])): fil = signal.boxcar(cutoff) output = signal.convolve(self.recarray[:,i]/cutoff, fil, mode='same') self.filteredseries[:,i] = output return self.filteredseries
def acf(self, pmin=0.1, pmax=100, filter=True, smooth=None): """Filters with pmax = pmax, then returns ACF up to lag=2*pmax """ if filter: if self._x_full is None: self._get_data() x, y, yerr = bandpass_filter(self._x_full, self._y_full, self._yerr_full, zero_fill=True, pmin=pmin, pmax=pmax) else: x, y = self.x, self.y lags, ac = acf(x, y, maxlag=2 * pmax) if smooth is not None: cadence = np.median(np.diff(lags)) Nbox = smooth / cadence if Nbox >= 3: ac = convolve(ac, boxcar(Nbox) / float(Nbox), mode='reflect') return lags, ac
def filter_csd(self): '''Spatial filtering of the CSD estimate, using an N-point filter''' if not self.f_order > 0 and type(self.f_order) == type(3): raise Exception, 'Filter order must be int > 0!' if self.f_type == 'boxcar': num = ss.boxcar(self.f_order) denom = pl.array([num.sum()]) elif self.f_type == 'hamming': num = ss.hamming(self.f_order) denom = pl.array([num.sum()]) elif self.f_type == 'triangular': num = ss.triang(self.f_order) denom = pl.array([num.sum()]) elif self.f_type == 'gaussian': num = ss.gaussian(self.f_order[0], self.f_order[1]) denom = pl.array([num.sum()]) else: raise Exception, '%s Wrong filter type!' % self.f_type num_string = '[ ' for i in num: num_string = num_string + '%.3f ' % i num_string = num_string + ']' denom_string = '[ ' for i in denom: denom_string = denom_string + '%.3f ' % i denom_string = denom_string + ']' print 'discrete filter coefficients: \nb = %s, \na = %s' % \ (num_string, denom_string) self.csd_filtered = pl.empty(self.csd.shape) for i in xrange(self.csd.shape[1]): self.csd_filtered[:, i] = ss.filtfilt(num, denom, self.csd[:, i])
def get_audio_from_frame(frame, win=signal.boxcar(160), classes=256): ## Convert frame to audio audio_vec = frame_to_audio(frame, win) ## Convert Normalized audio back to un-Normalized audio # gen_audio = generate_audio(audio_vec, classes=classes) gen_audio = audio_vec return gen_audio
def psd(self): """ Calculate the one-sided non-windowed power spectrum of the light curve. This uses the :func:`matplotlib.mlab.psd` function for computing the power spectrum, with a single non-overlapping FFT. Returns ------- sk : array-like The Power spectral density of the light curve. f : array-like An array of the frequencies. """ l = len(self.clc) # get the power spectrum of the lightcurve data sk, f = ml.psd(x=self.clc, window=signal.boxcar(l), noverlap=0, NFFT=l, Fs=self.fs(), sides='onesided') # return power spectral density and array of frequencies return sk, f
def generate_window(self, wnd_size, front_size = 3): #wnd = np.hstack((np.zeros(wnd_size),np.ones(wnd_size))) wnd = signal.hann(2*front_size) wnd = np.insert(wnd,front_size,signal.boxcar(wnd_size)) wnd = np.insert(np.zeros(2*front_size),front_size, wnd) return wnd
def timbral_fe_rectangular_notopdb(self): """ Extracts mfcc and its delta feature from input audio rectangular window (or no window) is used. Args: fmin: Minimum frequency of Mel-filter bank, defaults to 0 fmax: Maximum frequency of Mel-filter bank,defaults to Sampling rate/2 """ # MFCC extraction power = 2 S = np.abs( librosa.core.stft(y=self.audio_buffer, n_fft=self.n_fft, hop_length=self.n_hop, win_length=self.win_len, window=signal.boxcar(self.n_fft, sym=False)))**power # Mel-filter bank mel_basis = librosa.filters.mel(sr=self.sampling_rate, n_fft=self.n_fft, n_mels=self.n_mel) S = np.dot(mel_basis, S) S = librosa.core.logamplitude(S, top_db=None) Y_mfcc_coeff = np.dot( librosa.filters.dct(n_filters=self.n_mfcc, n_input=S.shape[0]), S) Y_mfcc_coeff_transpose = np.transpose(Y_mfcc_coeff) # delta mfcc features mfcc_delta = librosa.feature.delta(Y_mfcc_coeff) return (Y_mfcc_coeff_transpose, mfcc_delta.T)
def main(i): #globalize the current reading and previous data. global time global orig_signal reading = str(ser.readline().decode('utf-8')) #calculate and plot original EMG scaled signal if "\n" in reading: orig_signal += [float(reading[reading.find("g:") + 3:reading.find("V")])] time += [int(reading[reading.find("t:") + 3:reading.find("micro")])] plot_signal(fig1, time, orig_signal, 'Original EMG Scaled Data') #calculate and plot hi-low pass signal b_high, a_high = signal.butter(3, 0.1, 'highpass', analog=False) high_pass_signal = my_filter(b_high, a_high, orig_signal) b_low, a_low = signal.butter(3, .5, 'lowpass', analog=False) hilo_pass_signal = my_filter(b_low, a_low, high_pass_signal) plot_signal(fig2, time, hilo_pass_signal, 'High-Low Pass EMG Scaled Data') #calculate and plot rectified signal rectified_signal = [] for x in hilo_pass_signal: rectified_signal += [abs(x)] plot_signal(fig3, time, rectified_signal, 'Rectified EMG Scaled Data') #calculate and plot smoothed signal box = signal.boxcar(100) smoothed_signal = signal.lfilter(box, 1, rectified_signal) plot_signal(fig4, time, smoothed_signal, 'Smoothed EMG Scaled Data') #calculate and plot the power spectral density frequency, power = signal.welch(hilo_pass_signal, 200) plot_signal(fig5, frequency, power, 'Power Spectral Density', 'Frequency(Hz)', 'log(Power/Hz)')
def fft_spectrum(self, y , fs=1, nl=None, o=0.75, win='hann', nAverage=np.inf): n = len(y) if nl is None: nl = n i = 0. m = 0 done = False f = np.arange(0, nl)/nl*fs Y = np.zeros(nl) if win == 'rectangle': w = sig.boxcar(nl) elif win == 'flattop': w = sig.flattop(nl) else: w = sig.hann(nl) while not done: a = int(np.floor(i*nl)) b = int(a+nl) Y = np.abs(fft(y[a:b]*w/np.sum(w)))+Y i = i+1*(1-o) m+=1 done = b > (n-nl*(1-o)) or m == nAverage Y = Y/m*2 return (Y, f, m)
def kconvol(array, kernel, scale_factor=1, center=0): if array.ndim == 1: if kernel.shape[0] == 0: wx = kernel else: wx = kernel.shape[1] border = wx * 2 eg1 = array.shape[1] + wx - 1 sa = np.zeros(border+array.shape[1]) sa[wx:eg1] = array a = np.rot90(np.transpose(array)) if kernel.size() == 1: sa = boxcar(sa, kernel) else: #check this... sa = np.convolve(sa, kernel) sa = sa[wx:eg1] if array.ndim == 2: if kernel.shape[0] == 0: wx = kernel wy = kernel else: wx = kernel.shape[1] else: raise ValueError('The current version of this code only supports up to 2 dimensions.')
def create_boxcar(raw, event_id=None, stim_dur=1): """ Generate boxcar representation of the experimental paradigm. Parameters ---------- raw : instance of Raw Haemoglobin data. event_id : as specified in MNE Information about events. stim_dur : Number The length of your stimulus. Returns ------- s : array Returns an array for each annotation label. """ from scipy import signal bc = signal.boxcar(round(raw.info['sfreq'] * stim_dur)) events, ids = mne.events_from_annotations(raw, event_id=event_id) s = np.zeros((len(raw.times), len(ids))) for idx, id in enumerate(ids): id_idx = [e[2] == idx + 1 for e in events] id_evt = events[id_idx] event_samples = [e[0] for e in id_evt] s[event_samples, idx] = 1. s[:, idx] = np.convolve(s[:, idx], bc)[:len(raw.times)] return s
def raw_ctd_filter(df=None, window="triangle", win_size=24, parameters=None): """ Filter raw CTD data using one of three window types (boxcar, hanning, triangle). Parameters ---------- df : DataFrame Raw CTD data window : str, optional Type of filter window win_size : int, optional Length of window in number of samples parameters : list of str, optional List of DataFrame columns to be filtered Returns ------- filtered_df : DataFrame CTD data with filtered parameters """ filter_df = df.copy() if parameters is not None: for p in parameters: if window == "boxcar": win = sig.boxcar(win_size) elif window == "hanning": win = sig.hann(win_size) elif window == "triangle": win = sig.triang(win_size) filter_df[p] = sig.convolve(filter_df[p], win, mode="same") / np.sum(win) return filter_df
def create_boxcar(raw, event_id=None, stim_dur=5): """ Create a boxcar of the experiment. .. warning:: The naming of this function may change. Use with caution. This is just a place holder while I get the documentation\ up and running. Parameters ---------- raw : instance of Raw Haemoglobin data. event_id : as specified in MNE Information about events. stim_dur : Number The length of your stimulus. Returns ------- s : array Returns an array for each trigger channel. """ from scipy import signal bc = signal.boxcar(round(raw.info['sfreq'] * stim_dur)) events, ids = mne.events_from_annotations(raw, event_id=event_id) s = np.zeros((len(raw.times), len(ids))) for idx, id in enumerate(ids): id_idx = [e[2] == idx + 1 for e in events] id_evt = events[id_idx] event_samples = [e[0] for e in id_evt] s[event_samples, idx] = 1. s[:, idx] = np.convolve(s[:, idx], bc)[:len(raw.times)] return s
def Rectangular(N, x): ventana = signal.boxcar(N) salida = np.multiply(x, ventana) return salida
def plot_specgram(ax, data, fs, nfft=256, noverlap=128, window='hann', cmap='jet', interpolation='bilinear', rasterized=True): if window not in SPECGRAM_WINDOWS: raise ValueError("Window not supported") elif window == "boxcar": mwindow = signal.boxcar(nfft) elif window == "hamming": mwindow = signal.hamming(nfft) elif window == "hann": mwindow = signal.hann(nfft) elif window == "bartlett": mwindow = signal.bartlett(nfft) elif window == "blackman": mwindow = signal.blackman(nfft) elif window == "blackmanharris": mwindow = signal.blackmanharris(nfft) specgram, freqs, time = mlab.specgram(data, NFFT=nfft, Fs=fs, window=mwindow, noverlap=noverlap) specgram = 10 * np.log10(specgram[1:, :]) specgram = np.flipud(specgram) freqs = freqs[1:] halfbin_time = (time[1] - time[0]) / 2.0 halfbin_freq = (freqs[1] - freqs[0]) / 2.0 extent = (time[0] - halfbin_time, time[-1] + halfbin_time, freqs[0] - halfbin_freq, freqs[-1] + halfbin_freq) ax.imshow(specgram, cmap=cmap, interpolation=interpolation, extent=extent, rasterized=rasterized) ax.axis('tight')
def enframe(self, datas, fs, frame_len, frame_inc, win): ''' ' datas: 语音数据 ' fs: 采样频率 ' frame_len: 帧长,单位秒 ' frame_inc: 帧移,单位秒 ' win: 窗函数 ''' datas_len = len(datas) # 数据总长度 frame_len = int(round(frame_len * fs)) # 帧长,数据个数 nstep = frame_len - int(round(frame_inc * fs)) # 帧移动步长,数据个数 if datas_len < frame_len: # 若信号长度小于帧长,则帧数定义为1 nf = 1 else: nf = int(np.ceil((1.0*datas_len-frame_len)/nstep)) + 1 pad_len = int((nf-1)*nstep + frame_len) # 所有帧总数据长度 # 多余的数据使用0填充 new_datas = np.concatenate((datas, np.zeros(pad_len - datas_len))) indices = np.tile(np.arange(0,frame_len),(nf,1))+np.tile(np.arange(0,nf*nstep,nstep),(frame_len,1)).T indices = np.array(indices, dtype = np.int32) # 否则会报类型错误 frames = new_datas[indices] #得到帧信号 # 加窗 if win == 'hamming': win = signal.hamming(frame_len) elif win == 'hanning': win = signal.hanning(frame_len) else: win = signal.boxcar(frame_len) return frames * np.tile(win, (nf, 1))
def blakmanTukey(signal, M=0, win="Bartlett", n1=0, n2=0, ax=0): if n1 == 0 and n2 == 0: # por defecto usa la selal completa n1 = 0 n2 = len(signal) N = n2 - n1 if M == 0: M = int(N / 5) M = 2 * M - 1 if M > N: raise ValueError('Window cannot be longer than data') if win == "Bartlett": w = np.bartlett(M) elif win == "Hanning": w = np.hanning(M) elif win == "Hamming": w = np.hamming(M) elif win == "Blackman": w = np.blackman(M) elif win == "Flattop": w = sg.flattop(M) else: w = sg.boxcar(M) r, lags = acorrBiased(signal) r = r[np.logical_and(lags >= 0, lags < M)] rw = r * w Px = 2 * fft(rw).real - rw[0] return Px
def graficarDienteDeSierra(): fm = 20_000 f0 = 138 t = linspace(0,1,fm) dientes = sawtooth(2*pi*f0*t) tv = int((fm/f0)) param = 2 figureLabel = "" if(param == 0): window = boxcar(int(2**(ceil(log2(2*tv))))) overlap = 0 figureLabel = "Diente de Sierra-Window=Boxcar" if(param == 1): window = get_window("hann", int(2**(ceil(log2(4*tv))))) overlap = int(len(window)/2) figureLabel = "Diente de Sierra-Window=Hann" if(param == 2): window = get_window("hamming", int(2**(ceil(log2(4*tv))))) overlap = int(len(window) / 2) figureLabel = "Diente de Sierra-Window=Hamming" print(len(window)) fftRst = stft(dientes, window, fm, overlap) fftRst /= np.max(fftRst) #normalizar la señal print(f"f_0={fftRst[138]}, 2f_0={fftRst[276]}, 3f_0={fftRst[414]}, 4f_0={fftRst[552]}") figure(figureLabel) plot(fftRst) xlabel("Freq (Hz)") ylabel("Magnitude") show() return 0
def boxcar_smooth(x, winsize, passes=2): """ Smooth a data array with a sliding boxcar filter Parameters ---------- x : ndarray data to be smoothed winsize : int size of boxcar window passes : int number of passes (default=2) Returns ------- xsmooth : ndarray smoothed data array,same size as input 'x' """ win = sig.boxcar(winsize) / winsize xsmooth = np.pad(x, (winsize, winsize), mode='reflect') for i in range(passes): xsmooth = sig.convolve(xsmooth, win, mode='same') xsmooth = xsmooth[winsize:-winsize] return xsmooth
def smooth_boxcar(data, selected_columns, winsize): """Boxcar smoothing of data Parameters ---------- data: dataframe selected_columns: list of keys, stating which columns will be smoothed winsize: number of samples of rectangle window Return ------ smoothed: dataFrame """ logger.info("Boxcar smoothing with winsize %d", winsize) smoothed = data.copy(deep=True) for col_header in selected_columns: column = smoothed[col_header].as_matrix() # padding data # when winsize is even, int(winsize/2) is bigger than int((winsize-1)/2) by 1 # when winsize is odd, int(winsize/2) is the same as int((winsize-1)/2) pad_head = [column[0]] * int((winsize - 1) / 2) pad_tail = [column[-1]] * int(winsize / 2) signal = np.r_[pad_head, column, pad_tail] window = boxcar(winsize) smoothed[col_header] = np.convolve( window / window.sum(), signal, mode='valid') return smoothed
def get_digit_indices(n_cycles=20, vols=256, vols_per_digit=2.56): """ Produce two boolean arrays, one for each run. Each array has shape (n_digits, n_volumes). Use these to select samples in our classification task """ # TODO: These indices are only correct if I understand Esther's stimulus timing correctly ... vols_per_digit_upsampled = int(vols_per_digit * 100) digits_run1 = [] for didx in range(1, 6): # create series of 1s for the first finger stimulation finger_signal = signal.boxcar(vols_per_digit_upsampled) # add zeros before and after accordingly to form first cycle. post_padded = np.append(finger_signal, [0] * vols_per_digit_upsampled * (5 - didx)) first_cycle = np.insert(post_padded, obj=0, values=[0] * vols_per_digit_upsampled * (didx - 1)) all_cycles = np.tile(first_cycle, n_cycles) # repeat to get all cycles # resample to volume space (i.e. take every 100th element) # and turn into boolean vector digit_bool = all_cycles[::100] > 0.01 digits_run1.append(digit_bool) digits_run1 = np.array(digits_run1) digits_run2 = np.flip(digits_run1, axis=0) return digits_run1, digits_run2
def background_subtract(KT, objpos=None, radius=100, minl=350, maxl=1000, n_std=9, n_iter=8, smoothing=400): if objpos is None: exclude = KT.KT.query_ball_point(objpos, radius) else: exclude = [] lams = [] specs = [] for ix in xrange(len(KT.data)): e = KT.data[ix] if ix in exclude: continue if not e.ok: continue if e.lamrms > 1: continue if e.xrange[1] - e.xrange[0] < 200: continue if e.yrange[1] < 0 : continue if e.yrange[0] < 0 : continue if not np.isfinite(e.yrange[0]): continue if not np.isfinite(e.yrange[1]): continue try:l,s = e.get_flambda() except: continue lams.append(l) specs.append(s) exptime = e.exptime all_lams = np.array([lam for sublist in lams for lam in sublist]) all_spec = np.array([spec for sublist in specs for spec in sublist]) ix = np.argsort(all_lams) l,s = all_lams[ix], all_spec[ix] ok = (l > minl) & (l < maxl) & np.isfinite(l) & np.isfinite(s) knots = np.arange(minl, maxl,.1) boxcar = SG.boxcar(smoothing)/smoothing nok = len(s[ok]) for i in xrange(n_iter): smoothed = SG.convolve(s[ok], boxcar, mode='same') ff = interp1d(l[ok], smoothed, kind='linear', bounds_error=False) res = (s - ff(l))*exptime std = np.abs(res / np.sqrt(s*exptime)) ok = (l > minl) & (l < maxl) & (std < n_std) & (np.isfinite(l)) print i, nok, len(s[ok]) if (float(nok)/len(s[ok]) - 1) < .001: break nok = len(s[ok]) n_knots = len(s)/smoothing knots = np.arange(minl, maxl, float(maxl-minl)/n_knots) bgd = Background(lam_nm=knots, spec=ff(knots), exptime=exptime) return bgd
def smooth_signal(x, window_len, window, pol_order, implemented_smooth_method): """smooth the data using a window with requested size. This method is based on the convolution of a scaled window with the signal. The signal is prepared by introducing reflected copies of the signal (with the window size) in both ends so that transient parts are minimized in the begining and end part of the output signal. output: the smoothed signal see also: numpy.hanning, numpy.hamming, numpy.bartlett, numpy.blackman, numpy.convolve scipy.signal.savgol_filter """ x= np.array(x) if x.ndim != 1: raise ValueError("smooth only accepts 1 dimension arrays.") if x.size < window_len: raise ValueError("Input array needs to be bigger than window size") #if window_len < 3: # return x if not window in implemented_smooth_method: raise ValueError("Window method should be 'flat', 'hanning', 'hamming', 'bartlett', 'blackman'") s = np.r_[x[window_len - 1:0:-1], x, x[-1:-window_len:-1]] if window == 'savgol': return savgol_filter(x, window_len, pol_order) elif window == 'boxcar': # moving average w = boxcar(window_len) y = np.convolve(w, s, mode='valid') elif window == 'flat': # moving average w = np.ones(window_len, 'd') y = np.convolve(w / w.sum(), s, mode='valid') else: w = eval(window + '(window_len)') y = np.convolve(w / w.sum(), s, mode='valid') return y[int(window_len / 2 - 1):int(-window_len / 2)]
def test_extremes(self): # Test extremes of alpha correspond to boxcar and hann tuk0 = signal.tukey(100, 0) box0 = signal.boxcar(100) assert_array_almost_equal(tuk0, box0) tuk1 = signal.tukey(100, 1) han1 = signal.hann(100) assert_array_almost_equal(tuk1, han1)
def win_len_change(self): if str(self.ui.winTypeComboBox.currentText()) == 'rectangular': window = ss.boxcar(self.ui.windowLenSpin.value()) elif str(self.ui.winTypeComboBox.currentText()) == 'tukey': window = ss.tukey(self.ui.windowLenSpin.value()) elif str(self.ui.winTypeComboBox.currentText()) == 'hann': window = ss.hann(self.ui.windowLenSpin.value()) data_conv = ss.convolve(self.raw_data, window, mode='same') self.plt.setData(self.data_x, data_conv, pen='g')
def xsmooth(flux,smooth=21): ''' boxcar smooth the flux ''' if (smooth)>1: q=convolve(flux,boxcar(smooth)/float(smooth),mode='same') return(q) else: return(flux)
def refineMask(mask, imageSeries, numDilations=3, thresh=0.5, se=None): def corrMaskWithSourcePreConv(imageSeriesSmoothed, dilatedBinaryMask, sourceSmoothed): corrImage = np.zeros((imageSeries.shape[0], imageSeries.shape[1])) bounds = np.squeeze(pymorph.blob(dilatedMask, 'boundingbox', output='data')) for x in range(bounds[1], bounds[3]): for y in range(bounds[0], bounds[2]): if dilatedBinaryMask[x,y]>0: corr = stats.pearsonr(sourceSmoothed[1:-1], imageSeriesSmoothed[x,y,:])[0] corrImage[x,y] = corr return corrImage # calculate box for smoothing box = sig.boxcar(3) box = box / box.sum() imageSeriesSmoothed = nd.convolve1d(imageSeries, box, axis=2, mode='mirror') completeRefinedMask = np.zeros_like(mask) if se is None: se = np.array([[0,1,0],[1,1,1],[0,1,0]]) #se = np.array([[1,1,1],[1,1,1],[1,1,1]]) seedMask = mask.copy() > 0 for rep in range(numDilations): seedMask = pymorph.dilate(seedMask, se) for maskIndex in range(1,mask.max()+1): origMask = mask == maskIndex dilatedOrigMask = origMask.copy() > 0 for rep in range(numDilations): dilatedOrigMask = pymorph.dilate(dilatedOrigMask, se) forbiddenMask = np.logical_or(np.logical_and(seedMask, np.logical_not(dilatedOrigMask)), pymorph.dilate(completeRefinedMask)) # make smoothed source source = avgFromROIInSeries(imageSeries, origMask) sourceSmoothed = np.convolve(source, box) dilatedMask = (mask==maskIndex).copy() for rep in range(numDilations+1): dilatedMask = pymorph.dilate(dilatedMask) corrMask = corrMaskWithSourcePreConv(imageSeriesSmoothed, dilatedOrigMask, sourceSmoothed) threshMask = corrMask >= thresh newMask = np.logical_and(np.logical_not(forbiddenMask), np.logical_or(threshMask, origMask)) #completeRefinedMask = np.logical_xor(completeRefinedMask, newMask) completeRefinedMask += (newMask>0)*maskIndex #pdb.set_trace() completeRefinedMask[completeRefinedMask > maskIndex] = 0 return completeRefinedMask
def smooth_waveform(in_waveform): smoothing_number = 10 smoothed_flux = signal.convolve(in_waveform.flux,signal.boxcar(smoothing_number),'same') cut_smoothed_flux = smoothed_flux[int(smoothing_number/2):-int(smoothing_number/2)] cut_wave = in_waveform.wave[int(smoothing_number/2):-int(smoothing_number/2)] # Mask negative values and skip spectra if something goes wrong if len(smoothed_flux)==0: print("The length of smoothed_flux was 0 so skipping\n") return in_waveform return waveform(cut_wave,cut_smoothed_flux,in_waveform.name)
def get_frame_from_file(file_path, sr=8000, duration=None, n_channels=1, classes=256, win=signal.boxcar(160), inc=80, is_cmvn=True): ## Read Audio if (isinstance(file_path, np.ndarray)): file_data = file_path else: filename, file_extension = os.path.splitext(file_path) if (file_extension == '.mat'): mat = hdf5storage.loadmat(file_path) file_data = np.array(mat['audio']).flatten() fs = np.asscalar(np.array(mat['fs'])) file_data = signal.resample(file_data, int(file_data.shape[0] * (sr / fs))) elif (duration is None): file_data, _ = lr.load(path=file_path, sr=sr, duration=duration, mono=n_channels == 1) else: file_data = read_audio(file_path, sampling_rate=sr, duration=duration, n_channels=n_channels) ## Normalize Audio for input to CNN # normalized_audio = normalize_audio(file_data, classes=classes) normalized_audio = file_data ## Enframe Normalized Audio frame = audio_to_frame(normalized_audio, win, inc) # frame = frame[:,~np.all(frame == 0, axis=0)] frame = frame[:, ~(frame.sum(axis=0) == 0)] ## Remove all zero-only speech units(columns) ## axis=1 ensure normalization across frames ## axis=0 ensure normalization within frames (as done for taslp work) if (is_cmvn): frame = stats.zscore(frame, axis=0, ddof=1) # frame= cmvn(frame) frame = frame[:, ~np.any(np.isnan(frame), axis=0)] frame = frame[:, ~np.any(np.isinf(frame), axis=0)] ## Random crop transform # if(frame.shape[1]>200): # idx = random.randint(0,frame.shape[1]-200) # frame = frame[:,idx:idx+200] return frame
def _smooth(self, image): """Smoothing work horse.""" t1 = time.time() data = image.get_data() if self.algorithm == 'gauss': s = 'sigma' else: s = 'size' debug_str = '{0}={1}, mode={2}'.format(s, self.smoothpars, self.mode) if self.mode == 'constant': debug_str += ', fillval={0}'.format(self.fillval) if self.algorithm == 'boxcar': kern = boxcar(self.smoothpars) kern /= kern.size new_dat = ndimage.convolve(data, kern, mode=self.mode, cval=self.fillval) elif self.algorithm == 'gauss': new_dat = ndimage.gaussian_filter(data, sigma=self.smoothpars, mode=self.mode, cval=self.fillval) else: # medfilt new_dat = ndimage.median_filter(data, size=self.smoothpars, mode=self.mode, cval=self.fillval) # Insert new image old_name = image.get('name', 'none') new_name = self._get_new_name(old_name) new_im = self._make_image(new_dat, image, new_name) self.fv.gui_call(self.fv.add_image, new_name, new_im, chname=self.chname) # This sets timestamp new_im.make_callback('modified') # Add change log s = 'Smoothed {0} using {1}, {2}'.format(old_name, self.algorithm, debug_str) iminfo = self.chinfo.get_image_info(new_name) iminfo.reason_modified = s self.logger.info(s) t2 = time.time() self.w.status.set_text('Done ({0:.3f} s)'.format(t2 - t1)) self.toggle_gui(enable=True)
def smooth(x, window_size=10): """ Smooth a series with a moving average (simple). Smoothing over the trailing values. :param x: (n,) :param window_size: int, size of the smoothing window :return: (n,) """ window = signal.boxcar(window_size) # using simple average window = window / np.sum(window) # normalize the window so we don't change the scale of convolved series averaged = signal.convolve(x, window, mode='valid') # convolved series will be smaller when using valid mode return np.pad(averaged, pad_width=(window_size-1, 0), mode='edge') # pad averaged on the left
def generate_window_coefs(window,window_length,file_path,numPoints): print '\nGenerating {} window of length {}...'.format(window,window_length) # specify total bits and fractional bits for fixed point input: n_bits = 16 n_frac_bits = 15 if (window=='boxcar'): coefs=signal.boxcar(window_length) elif (window=='hamming'): coefs=signal.hamming(window_length) elif (window=='hann'): coefs=signal.hann(window_length) elif (window=='blackman'): coefs=signal.blackman(window_length) else: coefs=signal.boxcar(window_length) #Write out hex file for VHDL intData=np.zeros(numPoints) nintData=np.uint16(coefs*(2**n_bits-1)) paddingFrac=4 if (window_length==numPoints): intData=coefs else: for i in range(len(coefs)): intData[int(numPoints/paddingFrac)+i]=coefs[i] intData = [ID*(2**n_bits-1) for ID in intData] intData=np.hstack([intData,intData]) with open(str(file_path)+'/fpgaCoefData'+str(numPoints)+'_'+str(window)+'.txt','w') as FID: FID.write('\n'.join(['{}'.format(int(x)) for x in intData])) with open(str(file_path)+'/macCoefData'+str(numPoints)+'_'+str(window)+'.txt','w') as FID: FID.write('signal myCoef : input_array32 :=(') FID.write(','.join(['x"{0:08X}"'.format(int(x)) for x in intData])+');')
def smooth_boxcar(self, width): """ Uses boxcar smoothing to smooth the internal arrays. Parameters ---------- width : integer The boxcar width in pixels. Reference: <a href="http://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.boxcar.html" target="_blank">scipy.signal.boxcar</a> """ # I don't know if this is really the proper way to do this... should be tested!! from scipy.signal import convolve, boxcar self.flux = convolve(self.flux, boxcar(M=width))
def test_tukey(): # Test against hardcoded data for k, v in tukey_data.items(): if v is None: assert_raises(ValueError, signal.tukey, *k) else: win = signal.tukey(*k) assert_allclose(win, v, rtol=1e-14) # Test extremes of alpha correspond to boxcar and hann tuk0 = signal.tukey(100, 0) tuk1 = signal.tukey(100, 1) box0 = signal.boxcar(100) han1 = signal.hann(100) assert_array_almost_equal(tuk0, box0) assert_array_almost_equal(tuk1, han1)
def generate_csp_features(csp, raw, picks, nwin, nfilters): """ Generate csp features and then smooth the features by convolution with a rectangle window. :param csp: The trained csp filter :param raw: The raw data :return: The filtered features """ # apply csp filters and rectify signal feat = np.dot(csp.filters_[0:nfilters], raw._data[picks]) ** 2 # smoothing by convolution with a rectangle window feattr = np.array([(convolve)(feat[i], boxcar(nwin), 'full') for i in range(nfilters)]) feattr = np.log(feattr[:, 0:feat.shape[1]]) return feattr
def _smooth(self, image): """Smoothing work horse.""" t1 = time.time() data = image.get_data() if self.algorithm == 'gauss': s = 'sigma' else: s = 'size' debug_str = '{0}={1}, mode={2}'.format(s, self.smoothpars, self.mode) if self.mode == 'constant': debug_str += ', fillval={0}'.format(self.fillval) if self.algorithm == 'boxcar': kern = boxcar(self.smoothpars) kern /= kern.size new_dat = ndimage.convolve( data, kern, mode=self.mode, cval=self.fillval) elif self.algorithm == 'gauss': new_dat = ndimage.gaussian_filter( data, sigma=self.smoothpars, mode=self.mode, cval=self.fillval) else: # medfilt new_dat = ndimage.median_filter( data, size=self.smoothpars, mode=self.mode, cval=self.fillval) # Insert new image old_name = image.get('name', 'none') new_name = self._get_new_name(old_name) new_im = self._make_image(new_dat, image, new_name) self.fv.gui_call( self.fv.add_image, new_name, new_im, chname=self.chname) # This sets timestamp new_im.make_callback('modified') # Add change log s = 'Smoothed {0} using {1}, {2}'.format( old_name, self.algorithm, debug_str) iminfo = self.chinfo.get_image_info(new_name) iminfo.reason_modified = s self.logger.info(s) t2 = time.time() self.w.status.set_text('Done ({0:.3f} s)'.format(t2 - t1)) self.toggle_gui(enable=True)
def computePSTH(spike_file1,spike_file2,times,window_before=1,window_after=2, binsize=1): ''' Input: - spike_file1: sorted spikes for Channels 1 - 96 - spike_file2: sorted spikes for Channels 97 - 160 - times: time points to align peri-stimulus time histograms to - window_before: amount of time before alignment points to include in time window, units in seconds - window_after: amount of time after alignment points to include in time window, units in seconds - binsize: time length of bins for estimating spike rates, units in milleseconds Output: - psth: peri-stimulus time histogram over window [window_before, window_after] averaged over trials ''' boxcar_length = 4. channels = np.arange(1,161) binsize = float(binsize)/1000 psth_time_window = np.arange(0,window_before+window_after-float(binsize),float(binsize)) boxcar_window = signal.boxcar(boxcar_length) # 2 ms before, 2 ms after for boxcar smoothing psth = dict() smooth_psth = dict() unit_labels = [] for channel in channels: if channel < 97: channel_spikes = [entry for entry in spike_file1 if (entry[1]==channel)] else: channel2 = channel % 96 channel_spikes = [entry for entry in spike_file2 if (entry[1]==channel2)] units = [spike[2] for spike in channel_spikes] unit_vals = set(units) # number of units if len(unit_vals) > 0: unit_vals.remove(0) # value 0 are units marked as noise events for unit in unit_vals: unit_name = 'Ch'+str(channel) +'_' + str(unit) spike_times = [spike[0] for spike in channel_spikes if (spike[2]==unit)] psth[unit_name] = np.zeros(len(psth_time_window)) unit_labels.append(unit_name) for time in times: epoch_bins = np.arange(time-window_before,time+window_after,float(binsize)) counts, bins = np.histogram(spike_times,epoch_bins) psth[unit_name] += counts[0:len(psth_time_window)]/binsize # collect all rates into a N-dim array psth[unit_name] = psth[unit_name]/float(len(times)) smooth_psth[unit_name] = np.convolve(psth[unit_name], boxcar_window,mode='same')/boxcar_length return psth, smooth_psth, unit_labels
def gFitTrace(self, specimage, y1, y2): """ Fit a gaussian to each column of an image. """ sizex, sizey = specimage.shape smoytrace = np.zeros(sizey).astype(np.float) boxcar_kernel = signal.boxcar(3) / 3.0 for c in np.arange(sizey): col = specimage[:, c] col = col - np.median(col) smcol = ni.convolve(col, boxcar_kernel).astype(np.float) fit = gfit.gfit1d(smcol, quiet=1, maxiter=15) smoytrace[c] = fit.params[1] return np.array(smoytrace)
def computePSTH_SingleChannel(spike_file,channel,times,window_before=1,window_after=2, binsize=1): ''' Input: - spike_file: sorted spikes for Channels N; spike_file should be the results of plx = plexfile.openFile('filename.plx') and spike_file = plx.spikes[:].data - times: time points to align peri-stimulus time histograms to - window_before: amount of time before alignment points to include in time window, units in seconds - window_after: amount of time after alignment points to include in time window, units in seconds - binsize: time length of bins for estimating spike rates, units in milleseconds Output: - psth: peri-stimulus time histogram over window [window_before, window_after] averaged over trials - smooth_psth: psth smoothed using boxcar filter - unit_labels: names of units on channel ''' boxcar_length = 4. channel = channel binsize = float(binsize)/1000 psth_time_window = np.arange(0,window_before+window_after-float(binsize),float(binsize)) boxcar_window = signal.boxcar(boxcar_length) # 2 ms before, 2 ms after for boxcar smoothing psth = dict() smooth_psth = dict() unit_labels = [] units = [spike[2] for spike in spike_file] unit_vals = set(units) # number of units if len(unit_vals) > 0: unit_vals.remove(0) # value 0 are units marked as noise events for unit in unit_vals: unit_name = 'Ch'+str(channel) +'_' + str(unit) spike_times = [spike[0] for spike in spike_file if (spike[2]==unit)] psth[unit_name] = np.zeros(len(psth_time_window)) unit_labels.append(unit_name) for time in times: epoch_bins = np.arange(time-window_before,time+window_after,float(binsize)) counts, bins = np.histogram(spike_times,epoch_bins) psth[unit_name] += counts[0:len(psth_time_window)]/binsize # collect all rates into a N-dim array psth[unit_name] = psth[unit_name]/float(len(times)) smooth_psth[unit_name] = np.convolve(psth[unit_name], boxcar_window,mode='same')/boxcar_length return psth, smooth_psth, unit_labels
def automatic_gain_control(self): data = self.model.getData() data = data-np.amin(data) (ntime,ntrace) = data.shape # intialize a filter window wlength = 21 window = signal.boxcar(wlength) gain_data = np.empty([ntime,ntrace]) # first filter than divide for i in range(0,ntrace): #gain_data[:,i] = signal.convolve(data[:,i],window,'same') gain_data[:,i] = signal.fftconvolve(data[:,i],window,'same') # add a small number before dividing (in case of zeros) gain_data = gain_data+epsi gain_data = data/gain_data #tmp = '/home/amin/Dropbox/research-code/seismic/code/gain_data.txt' #np.savetxt(tmp,gain_data,delimiter='\t') self.model.setAGCData(gain_data)
def construct_taper(npts, taper_type="tukey", alpha=0.2): """ Construct taper based on npts :param npts: the number of points :param taper_type: :param alpha: taper width :return: """ taper_type = taper_type.lower() _options = ['hann', 'boxcar', 'tukey'] if taper_type not in _options: raise ValueError("taper type option: %s" % taper_type) if taper_type == "hann": taper = signal.hann(npts) elif taper_type == "boxcar": taper = signal.boxcar(npts) elif taper_type == "tukey": taper = signal.tukey(npts, alpha=alpha) else: raise ValueError("Taper type not supported: %s" % taper_type) return taper
def test_basic(self): assert_allclose(signal.boxcar(6), [1, 1, 1, 1, 1, 1]) assert_allclose(signal.boxcar(7), [1, 1, 1, 1, 1, 1, 1]) assert_allclose(signal.boxcar(6, False), [1, 1, 1, 1, 1, 1])
def findPeak(e_j, box): """Find the location of the maximum within the subset. Note that the data were collapsed to the left edge to get e_j, so the location is the intercept on the edge, rather than where the spectrum crosses the middle of the detector or where it crosses X = x_offset. Also, e_j is not the full height of the detector, just a subset centered on the nominal Y location of the spectrum. Parameters ---------- e_j: array_like 1-D array of data collapsed along dispersion axis, taking into account the tilt of the spectrum box: int Smooth e_j with a box of this width before looking for the maximum Returns ------- tuple The location (float) in the cross-dispersion direction relative to the first pixel in e_j, an estimate of the uncertainty in that location, and the FWHM of the peak in the cross-dispersion profile """ boxcar_kernel = signal.boxcar(box) / box e_j_sm = ndimage.convolve(e_j, boxcar_kernel, mode="nearest") index = np.argsort(e_j_sm) ymax = index[-1] nelem = len(e_j) # This may be done again later, after we have found the location more # accurately. fwhm = findFwhm(e_j, ymax) # fit a quadratic to at least five points centered on ymax MIN_NPTS = 5 npts = int(round(fwhm)) npts = max(npts, MIN_NPTS) if npts // 2 * 2 == npts: npts += 1 x = np.arange(nelem, dtype=np.float64) j1 = ymax - npts // 2 j1 = max(j1, 0) j2 = j1 + npts if j2 > nelem: j2 = nelem j1 = j2 - npts j1 = max(j1, 0) (coeff, var) = cosutil.fitQuadratic(x[j1:j2], e_j_sm[j1:j2]) (y_locn, y_locn_sigma) = cosutil.centerOfQuadratic(coeff, var) if y_locn is None: y_locn = ymax y_locn_sigma = 999. # Find the FWHM again if the location is far from the brightest pixel. if abs(y_locn - ymax) > fwhm / 4.: fwhm = findFwhm(e_j, y_locn) return (y_locn, y_locn_sigma, fwhm)
def filter_csd(self, csd, filterfunction='convolve'): ''' Spatial filtering of the CSD estimate, using an N-point filter Arguments --------- csd : np.ndarrray * quantity.Quantity Array with the csd estimate filterfunction : str 'filtfilt' or 'convolve'. Apply spatial filter using scipy.signal.filtfilt or scipy.signal.convolve. ''' if self.f_type == 'gaussian': try: assert(len(self.f_order) == 2) except AssertionError as ae: raise ae('filter order f_order must be a tuple of length 2') else: try: assert(self.f_order > 0 and isinstance(self.f_order, int)) except AssertionError as ae: raise ae('Filter order must be int > 0!') try: assert(filterfunction in ['filtfilt', 'convolve']) except AssertionError as ae: raise ae("{} not equal to 'filtfilt' or \ 'convolve'".format(filterfunction)) if self.f_type == 'boxcar': num = ss.boxcar(self.f_order) denom = np.array([num.sum()]) elif self.f_type == 'hamming': num = ss.hamming(self.f_order) denom = np.array([num.sum()]) elif self.f_type == 'triangular': num = ss.triang(self.f_order) denom = np.array([num.sum()]) elif self.f_type == 'gaussian': num = ss.gaussian(self.f_order[0], self.f_order[1]) denom = np.array([num.sum()]) elif self.f_type == 'identity': num = np.array([1.]) denom = np.array([1.]) else: print('%s Wrong filter type!' % self.f_type) raise num_string = '[ ' for i in num: num_string = num_string + '%.3f ' % i num_string = num_string + ']' denom_string = '[ ' for i in denom: denom_string = denom_string + '%.3f ' % i denom_string = denom_string + ']' print(('discrete filter coefficients: \nb = {}, \ \na = {}'.format(num_string, denom_string))) if filterfunction == 'filtfilt': return ss.filtfilt(num, denom, csd, axis=0) * csd.units elif filterfunction == 'convolve': csdf = csd / csd.units for i in range(csdf.shape[1]): csdf[:, i] = ss.convolve(csdf[:, i], num / denom.sum(), 'same') return csdf * csd.units
numtaps = 185 taps_none = firwin2(numtaps, freqs, gains, fs=fs, window=None) taps_h = firwin2(numtaps, freqs, gains, fs=fs) beta = 2.70 taps_k = firwin2(numtaps, freqs, gains, fs=fs, window=('kaiser', beta)) w_none, h_none = freqz(taps_none, 1, worN=2000) w_h, h_h = freqz(taps_h, 1, worN=2000) w_k, h_k = freqz(taps_k, 1, worN=2000) plt.figure(figsize=(4.0, 2.8)) win_boxcar = boxcar(numtaps) win_hamming = hamming(numtaps) win_kaiser = kaiser(numtaps, beta) plt.plot(win_hamming, label='Hamming') plt.plot(win_kaiser, label='Kaiser, $\\beta$=%.2f' % beta) plt.plot(win_boxcar, label='rectangular') plt.xticks([0, (numtaps - 1)//2, numtaps - 1]) plt.xlabel('Sample number') plt.ylim(0, 1.05) plt.grid(alpha=0.25) plt.title("Window functions", fontsize=10) plt.legend(framealpha=1, shadow=True) plt.tight_layout() plt.savefig("firwin2_examples_windows.pdf")
import nitime.algorithms as tsa import nitime.utils as utils from nitime.viz import winspect from nitime.viz import plot_spectral_estimate """ For demonstration, we will use a window of 128 points: """ npts = 128 fig01 = plt.figure() # Boxcar with zeroed out fraction b = sig.boxcar(npts) zfrac = 0.15 zi = int(npts*zfrac) b[:zi] = b[-zi:] = 0 name = 'Boxcar - zero fraction=%.2f' % zfrac winspect(b, fig01, name) """ .. image:: fig/multi_taper_spectral_estimation_01.png The figure on the left shows a boxcar window and the figure on the right shows the spectrum of the boxcar function (in dB units, relative to the frequency band of interest). These two problems can together be mitigated through the use of other
def makemask( self ): """ Make and output mask """ thresh = self.thresh medfile = self.medfile callist = self.callist verbosity = self.verbosity blot1 = np.zeros((ASIZE,ASIZE), dtype=np.float64) # open callist and read file names cfile = open(callist, 'r') calfiles = [] num_files = 0 while 1: # first count number of files in list and generate file list line = cfile.readline() if not line: break num_files += 1 calfiles.append( line ) cfile.close() bltfiles = [] # list of blot files if (verbosity >=1 ): print('There are' ,num_files,'cal files. They are : ') for ii in range(num_files): calfiles[ii].lstrip().rstrip() # strip leading and trailing whitespace calfile_prefix = calfiles[ii].split('_')[0] if (verbosity >=1 ): print(' calfiles[',ii,'] = ',calfiles[ii]) # associate blt files with cal files bltfile = calfile_prefix+str("_cal_sci1_blt.fits") bltfile.lstrip().rstrip() # strip leading and trailing whitespace bltfiles.append( bltfile ) bltfiles[ii] = bltfile im_cube = np.zeros((ASIZE, ASIZE, num_files), dtype=np.float64) blot_cube = np.zeros((ASIZE, ASIZE, num_files), dtype=np.float64) for kk in range(num_files): fh_cal = pyfits.open(calfiles[ kk ]) fh_blot = pyfits.open(bltfiles[ kk ]) im_cube[:,:,kk] = fh_cal[1].data blot_cube[:,:,kk] = fh_blot[0].data # make mask from blotted images mask_cube = np.zeros((ASIZE, ASIZE, num_files), dtype=np.float64) boxcar_kernel = signal.boxcar((3, 3)) / 9 for ii in range(num_files): mm = np.zeros((ASIZE, ASIZE), dtype=np.float64) dif_0 = blot_cube[:,:,ii] dif = np.reshape( dif_0,((ASIZE,ASIZE))) ur = dif > thresh mm[ ur ] = 1 # expand the mask. # smooth over 3x3 ; this will differ from IDL's "smooth" which ... # ... leaves boundary values unchanged, which is not an option in # convolve's boxcar mm = ndimage.convolve(mm, boxcar_kernel) ur = mm != 0.0 mm = np.zeros((ASIZE, ASIZE), dtype=np.float64) mm[ ur ] = 1 mask_cube[:,:,ii] = mm ## make the masked median image if (verbosity >=1 ): print(' Making the masked median image ... ') maskall= np.zeros((ASIZE, ASIZE), dtype=np.float64) for jj in range(ASIZE): for kk in range(ASIZE): uu = mask_cube[ kk,jj,:] != 1 im_sub = im_cube[kk,jj,uu] im_sub_size = im_sub.size im_1d = np.reshape( im_sub, im_sub.size) if ( im_sub_size > 0 ): maskall[ kk,jj ]= np.median(im_1d) # get primary header of 1st cal file to copy to output fh_cal0 = pyfits.open(calfiles[ 0 ]) pr_hdr = fh_cal0[0].header write_to_file(maskall, medfile, pr_hdr, verbosity) if (verbosity >=1 ): print('DONE')
def ttFindSpec(xdisp, xtract_info, life_adj_offset, xd_range, box): """Find the location in the cross-dispersion direction. Parameters ---------- xdisp: array_like The cross-dispersion profile, 1-D array of time-tag data collapsed along the dispersion axis, but taking into account the tilt of the spectrum. xtract_info: array_like Data block (but just one row) from the xtractab. life_adj_offset: float Normally this will be 0. If the LIFE_ADJ keyword is -1, however, indicating that the aperture block is not at one of the recognized "lifetime positions," life_adj_offset will be the expected offset (in pixels) of the wavecal spectrum from lifetime position 1. xd_range: int Search within + or - xd_range from the nominal location for the peak in xdisp. box: int Smooth xdisp with a box of this width before looking for the maximum. Returns ------- (shift2, y): tuple of two floats shift2 is the shift from nominal in the cross-dispersion direction (or None), and y is the location of the spectrum. The location is based on fitting a quadratic to points near the maximum. Note that the data were collapsed to the left edge to get xdisp, so the location is the intercept on the edge, rather than where the spectrum crosses the middle of the detector. """ y_nominal = xtract_info.field("b_spec")[0] + life_adj_offset segment = xtract_info.field("segment")[0] # for possible warning message # The values of y_nominal and xd_range should be such that neither # y0 nor y1 will be less than zero or greater than 1023. y0 = int(round(y_nominal - xd_range)) y1 = int(round(y_nominal + xd_range)) + 1 if y0 < 0 or y1 >= len(xdisp): cosutil.printWarning("XD_RANGE in WCPTAB is too large.") y0 = max(y0, 0) y1 = min(y1, len(xdisp) - 1) boxcar_kernel = scipysignal.boxcar(box) / box xdisp_sm = ndimage.convolve(xdisp, boxcar_kernel, mode="nearest") len_xdisp_sm = len(xdisp_sm) if y0 >= y1: return (None, 0.) index = np.argsort(xdisp_sm[y0:y1]) y = y0 + index[-1] signal = xdisp_sm[y] # value in smoothed array # Check for duplicate values. y_min = y y_max = y while y_min > 0 and xdisp_sm[y_min] == signal: y_min -= 1 while y_max < len_xdisp_sm and xdisp_sm[y_max] == signal: y_max += 1 y_float = float(y_min + y_max) / 2. y = int(round(y_float)) # Fit a quadratic to the smoothed curve near the peak. fit_range = (y_max - y_min) + box if fit_range < xd_range: r0 = y - fit_range // 2 r1 = r0 + fit_range r0 = max(r0, 0) r1 = min(r1, len_xdisp_sm) r0 = r1 - fit_range x = np.arange(fit_range, dtype=np.float64) (coeff, var) = cosutil.fitQuadratic(x, xdisp_sm[r0:r1]) (y_temp, y_float_sigma) = cosutil.centerOfQuadratic(coeff, var) if y_temp is None: return (None, 0.) y_float = y_temp + r0 # Find the background level. i = index[(y1-y0)//2] background = xdisp_sm[y0+i] # median of smoothed array sigma_s = math.sqrt(signal * box) sigma_b = math.sqrt(background * box) sigma_s_b = math.sqrt(sigma_s**2 + sigma_b**2) if sigma_s_b > 0.: signal_to_noise = (signal - background) * box / sigma_s_b else: signal_to_noise = 0. if signal_to_noise >= 5.: shift2 = y_float - y_nominal + life_adj_offset else: shift2 = None return (shift2, y_float)
# get data X = epochs.get_data() y = np.array(y) # train CSP csp = CSP(n_components=nfilters, reg='lws') csp.fit(X,y) ################ Create Training Features #################################x` # apply csp filters and rectify signal feat = np.dot(csp.filters_[0:nfilters],raw._data[picks])**2 # smoothing by convolution with a rectangle window feattr = np.empty(feat.shape) for i in range(nfilters): feattr[i] = np.log(convolve(feat[i],boxcar(nwin),'full'))[0:feat.shape[1]] feattr_tot.append(feattr) # training labels # they are stored in the 6 last channels of the MNE raw object labels = raw._data[32:] # vertically stack filter bank features together feattr = np.concatenate(feattr_tot) ################ Create test Features ##################################### # read test data fnames = glob('test/subj%d_series*_data.csv' % (subject)) raw = concatenate_raws([creat_mne_raw_object(fname, read_events=False) for fname in fnames])
epochs = concatenate_epochs(epochs_tot) # get data X = epochs.get_data() y = np.array(y) # train CSP csp = CSP(n_components=nfilters, reg='lws') csp.fit(X,y) ################ Create Training Features ################################# # apply csp filters and rectify signal feat = np.dot(csp.filters_[0:nfilters],raw._data[picks])**2 # smoothing by convolution with a rectangle window feattr = np.array(Parallel(n_jobs=-1)(delayed(convolve)(feat[i],boxcar(nwin),'full') for i in range(nfilters))) feattr = np.log(feattr[:,0:feat.shape[1]]) # training labels # they are stored in the 6 last channels of the MNE raw object labels = raw._data[32:] ################ Create test Features ##################################### # read test data fnames = glob("../30 Data/test/subj%d_series*_data.csv" % (subject)) raw = concatenate_raws([creat_mne_raw_object(fname, read_events=False) for fname in fnames]) raw._data[picks] = np.array(Parallel(n_jobs=-1)(delayed(lfilter)(b,a,raw._data[i]) for i in picks)) # read ids ids = np.concatenate([np.array(pd.read_csv(fname)['id']) for fname in fnames]) ids_tot.append(ids)