def test_perform(self): x = lscalar() f = function([x], self.op(x)) M = np.random.randint(3, 51, size=()) assert np.allclose(f(M), np.bartlett(M)) assert np.allclose(f(0), np.bartlett(0)) assert np.allclose(f(-1), np.bartlett(-1)) b = np.array([17], dtype="uint8") assert np.allclose(f(b[0]), np.bartlett(b[0]))
def test_perform(self): x = tensor.lscalar() f = function([x], self.op(x)) M = numpy.random.random_integers(3, 50, size=()) assert numpy.allclose(f(M), numpy.bartlett(M)) assert numpy.allclose(f(0), numpy.bartlett(0)) assert numpy.allclose(f(-1), numpy.bartlett(-1)) b = numpy.array([17], dtype='uint8') assert numpy.allclose(f(b[0]), numpy.bartlett(b[0]))
def test_bartlett_1(self): b = np.bartlett(5) print(b) b = np.bartlett(10) print(b) b = np.bartlett(12) print(b) return
def acf(f, lag=2000, window=2, conv=True): """My own interface to scipy""" from scipy import signal import numpy as np f2 = np.concatenate( (f, np.zeros((f.shape[0], f.shape[1])) )) ##This is needed due how scipy computes the ACF (it rolls) if not conv: acf = signal.correlate( f2, f, mode="valid", method='direct')[:-1] #Usually we need only the sum else: acf = signal.correlate( f2, f, mode="valid")[:-1] #Usually we need only the sum #Normalize acf = np.divide(acf.flatten(), np.flip(np.arange(1, acf.size + 1), axis=0)) #Select window if window == 1: win = np.blackman(2 * lag) elif window == 2: win = np.hanning(2 * lag) elif window == 3: win = np.hamming(2 * lag) elif window == 4: win = np.bartlett(2 * lag) else: win = np.ones(2 * lag) #Apply window acf = np.multiply(acf[:lag], win[lag:]) return acf
def main_true(): # load true ecg data num = 1 for i in range(1, 15): val = "{}-1".format(i) try: data = np.load("{}resampled-{}.npy".format(dirs, val)) except IOError: print("{} doesn't exist.".format(val)) continue else: print("Start processing {}".format(val)) for k in range(len(data)): y = data[k] fig = plt.figure() ax = fig.add_subplot(1, 1, 1) # spectrogram param keeps the same to Matlab. (include the window if using hamming) plt.axis("off") plt.specgram(y, NFFT=nfft, Fs=fs, noverlap=int(fs*(475/512)), window=np.bartlett(fs)) extent = ax.get_window_extent().transformed(fig.dpi_scale_trans.inverted()) # save fig without the white border. plt.savefig("./dataset/Specgrams/1/{}.png".format(num), bbox_inches=extent) plt.close() num += 1 print("{} finished. pic num is {}".format(val, num-1))
def smooth(x, windowLen, windowType="boxcar"): """Smooth a numpy array, returning the smoothed values Adapted from http://www.scipy.org/Cookbook/SignalSmooth""" if x.ndim != 1: raise ValueError("smooth only accepts 1 dimension arrays.") if x.size < windowLen: raise ValueError("Input vector needs to be bigger than window size.") if windowLen < 3: return x if windowType == "boxcar": w = np.ones(windowLen,'d') elif windowType == "hamming": w = np.hamming(windowLen) elif windowType == "hanning": w = np.hanning(windowLen) elif windowType == "bartlett": w = np.bartlett(windowLen) elif windowType == "blackman": w = np.blackman(windowLen) else: raise ValueError("windowType %s is not one of 'boxcar', 'hanning', 'hamming', 'bartlett', 'blackman'" % windowType) s = np.r_[x[windowLen-1:0:-1],x,x[-1:-windowLen:-1]] y = np.convolve(w/w.sum(), s, mode='valid') return y[windowLen//2:-windowLen//2 + 1]
def apply_filter(self, array): if len(array.shape)!=1: return False # need 2D data nx = array.shape[0] # Apodization function if self.type == 1: ft = np.bartlett(nx) elif self.type == 2: ft = np.blackman(nx) elif self.type == 3: ft = np.hamming(nx) elif self.type == 4: ft = np.hanning(nx) elif self.type == 5: # Blackman-Harris window a0 = 0.35875 a1 = 0.48829 a2 = 0.14128 a3 = 0.01168 x = np.linspace(0,1,nx) ft = a0 - a1*np.cos(2*np.pi*x) + a2*np.cos(4*np.pi*x) + a3*np.cos(6*np.pi*x) elif self.type == 6: # Lanczos window x = np.linspace(-1,1,nx) ft = np.sinc(x) elif self.type == 7: x = np.linspace(-1,1,nx) exec('x='+self.custom) ft = x else: ft = np.ones(nx) array.data = array.data*ft return True
def smooth(x, window_len=11, window='hanning'): """ """ if x.ndim != 1: raise ValueError("smooth only accepts 1 dimension arrays.") if x.size < window_len: raise ValueError("Input vector needs to be bigger than window size.") if window_len < 3: return x s = np.r_[2 * x[0] - x[window_len - 1::-1], x, 2 * x[-1] - x[-1:-window_len:-1]] if window == 'flat': # moving average w = np.ones(window_len) elif window == 'hanning': w = np.hanning(window_len) elif window == 'hamming': w = np.hamming(window_len) elif window == 'bartlett': w = np.bartlett(window_len) elif window == 'blackman': w = np.blackman(window_len) else: raise ValueError( "Window is on of 'flat', 'hanning', 'hamming', 'bartlett', 'blackman'" ) y = np.convolve(w / w.sum(), s, mode='same') return y[window_len:-window_len + 1]
def power_spectrum(trace, time_window=1000, overlap_window=None, enforce_pow2=True, freq_range=None): # ----------------------------------------------------------------------------- ''' Returns the power spectrum of trace. Parameters set to fit with what Paolo does within his project. ''' from matplotlib.mlab import psd from matplotlib.pylab import detrend_linear, detrend_mean, detrend_none from neurovivo.common import nextpow2 dt = (trace._time[1]-trace._time[0]).rescale(pq.ms).item() Fs = 1000./dt NFFT=int(time_window/dt) if enforce_pow2: NFFT=nextpow2(NFFT) time_window = 1.*time_window*NFFT/int(time_window/dt) if overlap_window == None: overlap_window = 0.75 * time_window noverlap=int(overlap_window/dt) window = np.bartlett(NFFT) #print dt, Fs, NFFT, noverlap pows, freqs = psd(detrend_mean(trace._data), NFFT=NFFT, Fs=Fs, noverlap=noverlap, window=window, detrend=detrend_none) if freq_range == None: return pows, freqs else: freqs, pows = cmn.splice_two_vectors(freqs, pows, freq_range) return pows, freqs
def plotPSD(data, fftWindow, Fs): assert fftWindow in [ 'rectangular', 'bartlett', 'blackman', 'hamming', 'hanning' ] N = len(data) # Generate the selected window if fftWindow == "rectangular": window = np.ones(N) elif fftWindow == "bartlett": window = np.bartlett(N) elif args.fftWindow == "blackman": window = np.blackman(N) elif fftWindow == "hamming": window = np.hamming(N) elif fftWindow == "hanning": window = np.hanning(N) dft = np.fft.fft(data * window) if Fs == None: # If the sample rate is known then plot PSD as # Power/Freq in (dB/Hz) plt.psd(data * window, NFFT=N) print("first") else: # If sample rate is not known then plot PSD as # Power/Freq as (dB/rad/sample) plt.psd(data * window, NFFT=N, Fs=Fs) print("second") plt.show()
def bartlett_window(self, show=0): apod = N.bartlett(self.data_points) for i in range(2): self.the_result.y[i] = self.the_result.y[i]*apod if show == 1: return self.the_result return self
def gen_mel_filts(num_filts, framelength, samp_freq): mel_filts = numpy.zeros((framelength, num_filts)) step_size = int(framelength/float((num_filts + 1))) #Sketch it out to understand filt_width = math.floor(step_size*2) filt = numpy.bartlett(filt_width) step = 0 for i in xrange(num_filts): mel_filts[step:step+filt_width, i] = filt step = step + step_size # Let's find the linear filters that correspond to the mel filters # The freq axis goes from 0 to samp_freq/2, so... samp_freq = samp_freq/2 filts = numpy.zeros((framelength, num_filts)) for i in xrange(num_filts): for j in xrange(framelength): freq = (j/float(framelength)) * samp_freq # See which freq pt corresponds on the mel axis mel_freq = 1127 * numpy.log( 1 + freq/700 ) mel_samp_freq = 1127 * numpy.log( 1 + samp_freq/700 ) # where does that index in the discrete frequency axis mel_freq_index = int((mel_freq/mel_samp_freq) * framelength) if mel_freq_index >= framelength-1: mel_freq_index = framelength-1 filts[j,i] = mel_filts[mel_freq_index,i] # Let's normalize each filter based on its width for i in xrange(num_filts): nonzero_els = numpy.nonzero(filts[:,i]) width = len(nonzero_els[0]) filts[:,i] = filts[:,i]*(10.0/width) return filts
def construct_window(width, family, scale): if family == "bartlett": return numpy.bartlett(width) * scale if family == "blackman": return numpy.blackman(width) * scale if family == "hamming": return numpy.hamming(width) * scale if family == "hann": import scipy.signal return scipy.signal.hann(width) * scale if family == "hanning": return numpy.hanning(width) * scale if family == "kaiser": beta = 14 return numpy.kaiser(width, beta) * scale if family == "tukey": import scipy.signal return scipy.signal.tukey(width) * scale print("window family %s not supported" % family)
def single_taper_spectrum(data, delta, taper_name=None): """ Returns the spectrum and the corresponding frequencies for data with the given taper. """ length = len(data) good_length = length // 2 + 1 # Create the frequencies. # XXX: This might be some kind of hack freq = abs(np.fft.fftfreq(length, delta)[:good_length]) # Create the tapers. if taper_name == 'bartlett': taper = np.bartlett(length) elif taper_name == 'blackman': taper = np.blackman(length) elif taper_name == 'boxcar': taper = np.ones(length) elif taper_name == 'hamming': taper = np.hamming(length) elif taper_name == 'hanning': taper = np.hanning(length) elif 'kaiser' in taper_name: taper = np.kaiser(length, beta=14) # Should never happen. else: msg = 'Something went wrong.' raise Exception(msg) # Detrend the data. data = detrend(data) # Apply the taper. data *= taper spec = abs(np.fft.rfft(data)) ** 2 return spec, freq
def smooth(input_data, nth_octave = 6, window_type='hamming'): """ Smooth input data over 1/n octave """ f_min = 30 f_max = 20e3 number_of_octaves = math.log(f_max / f_min, 2) # ideally, this should be computed from the display resolution number_of_points = 4048 points_per_octave = number_of_points / number_of_octaves log_data = _distribute_over_log(input_data, f_min, f_max, number_of_points) window_length = points_per_octave / nth_octave if window_type == 'hamming': window = np.hamming(window_length) elif window_type == 'bartlett': window = np.bartlett(window_length) elif window_type == 'blackman': window = np.blackman(window_length) elif window_type == 'hanning': window = np.hanning(window_length) output = np.convolve(window / window.sum(), log_data, mode='same') return output
def create_window(size, window_id="blackman", param=None): """ Create a new window numpy array param is only used for some windows. window_id can also be a function/functor which is used to create the window. >>> create_window("blackman", 500) ... # NumPy array of size 500 >>> create_window(myfunc, 500, param=3.5) ... # result of calling myfunc(500, 3.5) """ if window_id == "blackman": return np.blackman(size) elif window_id == "bartlett": return np.bartlett(size) elif window_id == "hamming": return np.hamming(size) elif window_id == "hanning": return np.hanning(size) elif window_id == "kaiser": return np.kaiser(size, 2.0 if param is None else param) elif window_id in ["ones", "none"]: return np.ones(size) elif callable(window_id): return window_id(size, param) else: raise ValueError(f"Unknown window {window_id}")
def windowing(input, window_type, axis=0): """Window the input based on given window type. Args: input: input numpy array to be windowed. window_type: enum chosen between Bartlett, Blackman, Hamming, Hanning and Kaiser. axis: the axis along which the windowing will be applied. Returns: """ window_length = input.shape[axis] if window_type == Window.BARTLETT: window = np.bartlett(window_length) elif window_type == Window.BLACKMAN: window = np.blackman(window_length) elif window_type == Window.HAMMING: window = np.hamming(window_length) elif window_type == Window.HANNING: window = np.hanning(window_length) else: raise ValueError("The specified window is not supported!!!") output = input * window return output
def get_window(window, wlen): if type(window) == str: # types: # boxcar, triang, blackman, hamming, hann, bartlett, flattop, parzen, # bohman, blackmanharris, nuttall, barthann if window == 'hamming': fft_window = np.hamming(wlen) elif window == 'bartlett': fft_window = np.bartlett(wlen) elif window == 'hann' or window == 'hanning': fft_window = np.hanning(wlen) else: #try: # scipy.signal.get_window gives non-symmetric results for hamming with even length :( #fft_window = scipy.signal.get_window(window, wlen) #except: #raise Exception('cannot obtain window type {}'.format(window)) raise Exception('cannot obtain window type {}'.format(window)) # fft_window = scipy.signal.hamming(win_length, sym=False) elif six.callable(window): # User supplied a windowing function fft_window = window(wlen) else: # User supplied a window vector. # Make it into an array fft_window = np.asarray(window) assert (len(fft_window) == wlen) fft_window.flatten() return fft_window
def window(self, window): window_size = self._window_packetsize buffer = allocate(shape=(window_size, ), dtype=np.int32) self._window_address = buffer.device_address if window == 'rectangular': buffer[:] = np.int32(np.ones(window_size)[:] * 2**14) elif window == 'bartlett': buffer[:] = np.int32(np.bartlett(window_size)[:] * 2**14) elif window == 'blackman': buffer[:] = np.int32(np.blackman(window_size)[:] * 2**14) elif window == 'hamming': buffer[:] = np.int32(np.hamming(window_size)[:] * 2**14) elif window == 'hanning': buffer[:] = np.int32(np.hanning(window_size)[:] * 2**14) else: buffer[:] = np.int32(np.ones(window_size)[:] * 2**14) window = 'rectangular' self._window_transfer = 1 while not self.window_ready: pass self._window_transfer = 0 self._window_type = window self._window_squaresum = np.sum( (np.array(buffer, dtype=np.single) * 2**-14)**2) self._window_sum = np.sum((np.array(buffer, dtype=np.single))) buffer.freebuffer() self._spectrum_typescale = \ int(struct.unpack('!i',struct.pack('!f',float((self._sample_frequency/self._decimation_factor)/(self._number_samples))))[0]) self._spectrum_powerscale = \ int(struct.unpack('!i',struct.pack('!f',float(1/((self._sample_frequency/self._decimation_factor)*self._window_squaresum))))[0])
def generate_window(fs, wave_int, duration, apply_start, apply_end, window_function): """ :param fs: number of samples per second (standard) :param wave_int: base sound where the window will be applied :param duration: duration of the window (it will be the same on the start and end) :param apply_start: True if the window should be created at the start, False otherwise. :param apply_end: True if the window should be created at the end, False otherwise. :param window_function: window function to be generated. Possible values accepted: 'Hanning', 'Hamming', 'Blackman', 'Bartlett'. It will revert to 'Hanning' if an unknown option is given. :return: Returns the modified sound with the window applied to it. """ len_fade = int(duration * fs) if window_function == 'Hanning': fade_io = np.hanning(len_fade * 2) elif window_function == 'Hamming': fade_io = np.hamming(len_fade * 2) elif window_function == 'Blackman': fade_io = np.blackman(len_fade * 2) elif window_function == 'Bartlett': fade_io = np.bartlett(len_fade * 2) else: # default fade_io = np.hanning(len_fade * 2) fadein = fade_io[:len_fade] fadeout = fade_io[len_fade:] win = np.ones(len(wave_int)) if apply_start: win[:len_fade] = fadein if apply_end: win[-len_fade:] = fadeout wave_int = wave_int * win return wave_int
def plotWindowFunc(): plt.clf() plt.plot(np.arange(20), np.kaiser(20,3.5)) plt.plot(np.arange(20), np.bartlett(20)) plt.plot(np.arange(20), np.blackman(20)) plt.plot(np.arange(20), np.hamming(20)) plt.plot(np.arange(20), np.hanning(20))
def mdist_curves(mdist_real): """ fit curve comparing median location error to the actual location error """ CHUNKS = 10 dists_ = np.array(list(mdist_real)) dists = dists_[dists_[:,0].argsort()] dist_count = dists.shape[0] # cut off the start to make even chunks chunks = np.split(dists[(dist_count%CHUNKS):,:],CHUNKS) bins = utils.dist_bins(30) for index,chunk in enumerate(chunks): row = chunk[:,1] hist,b = np.histogram(row,bins) bin_mids = (b[:-1]+b[1:])/2 bin_areas = np.pi*(b[1:]**2 - b[:-1]**2) scaled_hist = hist/(bin_areas*len(row)) window = np.bartlett(5) smooth_hist=np.convolve(scaled_hist,window,mode='same')/sum(window) coeffs = np.polyfit( np.log(bin_mids[1:121]), np.log(smooth_hist[1:121]), 3) yield dict( coeffs = list(coeffs), cutoff = 0 if index==0 else chunk[0,0], local = scaled_hist[0], )
def shortTermEny(signal, framelen, stride, fs, window='hamming'): """ :param signal: raw signal of waveform, unit: μV :param framelen: length of per frame, type: int :param stride: length of translation per frame :param fs: sampling rate per microsecond :param window: window's function :return: time_stE, stE """ if signal.shape[0] <= framelen: nf = 1 else: nf = int(np.ceil((1.0 * signal.shape[0] - framelen + stride) / stride)) pad_length = int((nf - 1) * stride + framelen) zeros = np.zeros((pad_length - signal.shape[0],)) pad_signal = np.concatenate((signal, zeros)) indices = np.tile(np.arange(0, framelen), (nf, 1)) + np.tile(np.arange(0, nf * stride, stride), (framelen, 1)).T.astype(np.int32) frames = pad_signal[indices] allWindows = {'hamming': np.hamming(framelen), 'hanning': np.hanning(framelen), 'blackman': np.blackman(framelen), 'bartlett': np.bartlett(framelen)} t = np.arange(0, nf) * (stride * 1.0 / fs) eny, amp = np.zeros(nf), np.zeros(nf) try: windows = allWindows[window] except: print("Please select window's function from: hamming, hanning, blackman and bartlett.") return t, eny, amp for i in range(0, nf): b = np.square(frames[i:i + 1][0]) * windows * 1.0 / fs eny[i] = np.sum(b) amp[i] = max(abs(frames[i:i + 1][0] * windows)) return t, eny, amp
def echo_decode(marked_audio: Audio, key: dict = None) -> bytes: np.fft.fft = pyfftw.interfaces.numpy_fft.fft np.fft.ifft = pyfftw.interfaces.numpy_fft.ifft pyfftw.interfaces.cache.enable() pyfftw.interfaces.cache.set_keepalive_time(1.0) m = key['m'] fragment_len = key['fragment_len'] bits_len = key['bits_len'] encoded_len = fragment_len * bits_len samples_width = marked_audio.sample_width marked_samples_reg = marked_audio.get_reshaped_samples() channel0_samples_reg = np.reshape(marked_samples_reg[0, :encoded_len], (fragment_len, bits_len), 'F') bits = [] for i in range(bits_len): # 这个 rcep 是求倒谱 rcep = np.real( np.fft.ifft( np.log( np.abs( np.fft.fft( np.multiply(channel0_samples_reg[:, i], np.bartlett(fragment_len))))))) # 比较峰值的大小来确定原来的信息 if rcep[m[0]] >= rcep[m[1]]: bits.append(0) else: bits.append(1) try: decoded_bytes = get_all_bytes(bits) except IndexError: raise WrongKeyError("Invalid key.") return decoded_bytes
def construct_window(width, family, scale): if family == 'bartlett': return numpy.bartlett(width) * scale if family == 'blackman': return numpy.blackman(width) * scale if family == 'hamming': return numpy.hamming(width) * scale if family == 'hann': import scipy.signal return scipy.signal.hann(width) * scale if family == 'hanning': return numpy.hanning(width) * scale if family == 'kaiser': beta = 14 return numpy.kaiser(width, beta) * scale if family == 'tukey': import scipy.signal return scipy.signal.tukey(width) * scale print('window family %s not supported' % family)
def estimate_unbnd_conc_in_region( motif, score_cov, atacseq_cov, chipseq_rd_cov, frag_len, max_chemical_affinity_change): # trim the read coverage to account for the motif length trimmed_atacseq_cov = atacseq_cov[len(motif)+1:] chipseq_rd_cov = chipseq_rd_cov[len(motif)+1:] # normalzie the atacseq read coverage atacseq_weights = trimmed_atacseq_cov/trimmed_atacseq_cov.max() # build the smoothing window sm_window = np.ones(frag_len, dtype=float)/frag_len sm_window = np.bartlett(2*frag_len) sm_window = sm_window/sm_window.sum() def build_occ(log_tf_conc): raw_occ = logistic(log_tf_conc + score_cov/(R*T)) occ = raw_occ*atacseq_weights smoothed_occ = np.convolve(sm_window, occ/occ.sum(), mode='same') return raw_occ, occ, smoothed_occ def calc_lhd(log_tf_conc): raw_occ, occ, smoothed_occ = build_occ(-log_tf_conc) #diff = (100*smoothed_occ - 100*rd_cov/rd_cov.sum())**2 lhd = -(np.log(smoothed_occ + 1e-12)*chipseq_rd_cov).sum() #print log_tf_conc, diff.sum() return lhd res = brute(calc_lhd, ranges=( slice(0, max_chemical_affinity_change, 1.0),))[0] log_tf_conc = max(0, min(max_chemical_affinity_change, res)) return -log_tf_conc
def get_boardsize_by_fft(zoomed_img): CLIP = 5000 width = zoomed_img.shape[1] # 1D fft per row, magnitude per row, average them all into a 1D array, clip magspec_clip = np.clip( np.average(np.abs(np.fft.fftshift(np.fft.fft(zoomed_img))), axis=0), 0, CLIP) # Smooth it smooth_magspec = np.convolve(magspec_clip, np.bartlett(7), 'same') if not len(smooth_magspec) % 2: smooth_magspec = np.append(smooth_magspec, 0.0) # The first frequency peak above 9 should be close to the board size. half = len(smooth_magspec) // 2 plt.subplot(111) plt.plot(range(-half, half + 1), smooth_magspec) plt.show() MINSZ = 9 highf = smooth_magspec[width // 2 + MINSZ:] maxes = scipy.signal.argrelextrema(highf, np.greater)[0] + MINSZ res = maxes[0] if len(maxes) else 0 print(res) if res > 19: res = 19 #elif res > 13: res = 13 else: res = 9 return res
def blakmanTukey(signal, M=0, win="Bartlett", n1=0, n2=0, ax=0): if n1 == 0 and n2 == 0: # por defecto usa la selal completa n1 = 0 n2 = len(signal) N = n2 - n1 if M == 0: M = int(N / 5) M = 2 * M - 1 if M > N: raise ValueError('Window cannot be longer than data') if win == "Bartlett": w = np.bartlett(M) elif win == "Hanning": w = np.hanning(M) elif win == "Hamming": w = np.hamming(M) elif win == "Blackman": w = np.blackman(M) elif win == "Flattop": w = sg.flattop(M) else: w = sg.boxcar(M) r, lags = acorrBiased(signal) r = r[np.logical_and(lags >= 0, lags < M)] rw = r * w Px = 2 * fft(rw).real - rw[0] return Px
def __init__(self): self._glue_app = gj.jglue() self._history = [] self._3d_data = {} self._2d_data = {} self._1d_data = {} self._3d_processing = {} self.add_3d_processing("Median Collapse over Wavelenths", np.nanmedian, 'a', (('axis', 0), )) self.add_3d_processing("Mean Collapse over Wavelenths", np.nanmean, 'a', (('axis', 0), )) self.add_3d_processing("Median Collapse over Space", np.nanmedian, 'a', (('axis', (1, 2)), )) self.add_3d_processing("Mean Collapse over Space", np.nanmean, 'a', (('axis', (1, 2)), )) self._2d_processing = {} self._1d_processing = {} self.add_1d_processing("Median Smoothing", scipy.signal.medfilt, 'volume', (('kernel_size', 3), )) self.add_1d_processing("Hanning Smoothing", np.convolve, 'a', (('mode', 'valid'), ('w', np.hanning(3)))) self.add_1d_processing("Hamming Smoothing", np.convolve, 'a', (('mode', 'valid'), ('w', np.hamming(3)))) self.add_1d_processing("Bartlett Smoothing", np.convolve, 'a', (('mode', 'valid'), ('w', np.bartlett(3)))) self.add_1d_processing("Blackman Smoothing", np.convolve, 'a', (('mode', 'valid'), ('w', np.blackman(3))))
def addWindow(points): # use a window function on the data to make the data fit # TODO choose a better window function windowFunction = np.bartlett(len(points)) for i, windowFactor in enumerate(windowFunction): points[i] = points[i] * windowFactor return points
def apply_window(sinogram): window = np.bartlett(len(sinogram)) for i in range(len(sinogram[0])): fft = np.fft.fft(sinogram[:, i]) fft = fft * window ifft = np.real(np.fft.ifft(fft)) sinogram[:, i] = ifft return sinogram
def nonlinear(x): """ Nonlinear energy operator for spike detection """ xo = np.int32(x) y = [xo[n]**2 + xo[n - 1] * xo[n + 1] for n in range(1, len(x) - 1)] window = np.bartlett(12) return np.convolve(y, window)
def nonlinear(x): """ Nonlinear energy operator for spike detection """ xo = np.int32(x) y = [xo[n] ** 2 + xo[n - 1] * xo[n + 1] for n in range(1, len(x) - 1)] window = np.bartlett(12) return np.convolve(y, window)
def get_windows(): bartlett = np.bartlett(M) rectangular = np.ones(shape=M) blackman = np.blackman(M) hamming = np.hamming(M) hanning = np.hanning(M) return (bartlett, rectangular, blackman, hamming, hanning)
def SelectWindowsFun(self, index): #global window x = {0: np.ones(self.CHUNK), 1: np.hamming(self.CHUNK), 2: np.hanning(self.CHUNK), 3: np.bartlett(self.CHUNK), 4: np.blackman(self.CHUNK)} self.window = x[index]
def Conditioned_spec(self): datafile = (str(self.lineEdit.text())) t, Q, I = np.loadtxt(datafile) #I = I/np.max(I) #Q = Q/np.max(Q) fs = np.round(1/(t[1]-t[0]), -3) #fs = np.round(10e6, -3) phase = np.unwrap(np.arctan2(Q,I)) #Original Signal using I and Q Data signal = I + 1j*Q n = len(I) #signal = signal - np.mean(signal) #Filter: high and low order = (float(self.lineEdit_2.text())) cutoff = (float(self.lineEdit_3.text())) if(str(self.comboBox.currentText())=='Signal - LowPass Filtered Signal'): b,a = butter(order,2*cutoff/fs,'low') signal_low = filtfilt(b,a,signal) filt_signal = signal - signal_low if(str(self.comboBox.currentText())=='HighPass Filtered Signal'): b,a = butter(order,2*cutoff/fs,'high') filt_signal = filtfilt(b,a,signal) order = (float(self.lineEdit_4.text())) fcutlow = (float(self.lineEdit_5.text())) fcuthigh = (float(self.lineEdit_6.text())) b,a = butter(order,[2*fcutlow,2*fcuthigh]/fs,'bandpass') cond_signal = filtfilt(b,a,filt_signal) #Spectrogram self.Status_Bar('Do not press another button before closing the graph') fft_pts = (int(self.lineEdit_11.text())) overlap_pts = (int(self.lineEdit_12.text())) pad = (int(self.lineEdit_13.text())) if(str(self.comboBox_2.currentText())=='Hanning'): windfn=np.hanning(fft_pts) if(str(self.comboBox_2.currentText())=='Blackman'): windfn=np.blackman(fft_pts) if(str(self.comboBox_2.currentText())=='Hamming'): windfn=np.hamming(fft_pts) if(str(self.comboBox_2.currentText())=='Bartlett'): windfn=np.bartlett(fft_pts) xmin = self.isnum((str(self.lineEdit_14.text()))) xmax = self.isnum((str(self.lineEdit_15.text()))) ymin = self.isnum((str(self.lineEdit_16.text()))) ymax = self.isnum((str(self.lineEdit_17.text()))) plt.figure() plt.specgram(cond_signal,NFFT=fft_pts,Fs=fs,window=windfn,noverlap=overlap_pts,pad_to=pad,scale ='linear') if((xmin!=-1) and (xmax!=-1)): plt.xlim(float(xmin),float(xmax)) if((ymin!=-1) and (ymax!=-1)): plt.ylim(float(ymin),float(ymax)) plt.colorbar() plt.grid() plt.show() self.Status_Bar('Another button can be pressed now')
def PowerSpectrum2(im, win=2, n1=1, n2=0): """2D spectrum estimation using the modified periodogram. This one includes a window function to decrease variance in \ the estimate. :param x: input sequence :type x: numpy.array :param n1: starting index, x(n1) :type n1: int :param n2: ending index, x(n2) :type n2: int :param win: The window type \n 1 = Rectangular \n 2 = Hamming \n 3 = Hanning \n 4 = Bartlett \n 5 = Blackman \n :type win: int :returns: spectrum estimate. :rtype: numpy.array .. note:: If n1 and n2 are not specified the periodogram of the entire sequence is computed. """ if n2 == 0: n2 = len(im[:,1]) N = n2 - n1 + 1 w = np.ones((N)) if (win == 2): w = np.hamming(N) elif (win == 3): w = np.hanning(N) elif (win == 4): w = np.bartlett(N) elif (win == 5): w = np.blackman(N); xs, ys = im.shape if xs/ys != 1: raise ValueError('Dimensions must be equal') m = w[:] * w[:][np.newaxis,:] U = np.linalg.norm(w)**2.0 / N**2.0 fftim = np.abs(np.fft.fftshift(np.fft.fft2(((im) * m)))) / ( (N**2.0) * U) return fftim
def SelectWindowsFun(self, index): #global window x = { 0: np.ones(self.CHUNK), 1: np.hamming(self.CHUNK), 2: np.hanning(self.CHUNK), 3: np.bartlett(self.CHUNK), 4: np.blackman(self.CHUNK) } self.window = x[index]
def fid_to_spec(self, window_f = ' ', beta = 2, t_start = 0., t_end = 100.0): ''' FFT with zero-padding of the FID. Saves the result in self.spec Parameters ---------- window_f: is the applied window function. The following window functions are applicable: hanning, hamming, blackman, bartlett, kaiser (beta (float) is only used by the kaiser window) t_start (float): specifies the beginning of the FID. The corresponding data points will be cut away. t_end (float): specifies the end of the FID. The corresponding data points will be cut away. Returns ------- none Notes ----- none ''' if len(self.fid) != 0: fid = slice_fid(self.fid, t_start, t_end) window_f = window_f.lower() # Choose the window function (default: rect / none) if window_f == 'hanning': fid[:,1] = fid[:,1] * np.hanning(len(fid)) elif window_f == 'hamming': fid[:,1] = fid[:,1] * np.hamming(len(fid)) elif window_f == 'blackman': fid[:,1] = fid[:,1] * np.blackman(len(fid)) elif window_f == 'bartlett': fid[:,1] = fid[:,1] * np.bartlett(len(fid)) elif window_f == 'kaiser': fid[:,1] = fid[:,1] * np.kaiser(len(fid), beta) h = (int(np.sqrt(len(fid))) + 1) ** 2 # Zero padding to n**2 length to enhance computing speed spec = np.absolute(np.fft.rfft(fid[:,1], h)) ** 2 / h spec = spec[0:int(h/2)] freq = np.fft.fftfreq(h, np.abs(fid[2,0] - fid[1,0])) / 1.E6 freq = freq[0:int(h/2)] # Combine FFT and frequencies self.spec = np.column_stack([freq, spec]) self.spec_params['npoints'] = len(spec) self.spec_params['max_f'] = np.max(freq) self.spec_params['min_f'] = np.min(freq) self.spec_params['delta_f'] = np.abs(freq[1]-freq[0])
def FT_old(f, step=1.0, lag=2000, window=2, nzeros=10000): """My own interface to scipy FFT f: function step: time step in fs window: boolean wich decides if a window is applied""" import numpy as np from scipy.fftpack import fft, dct from tools import units print('') print('@FT: PLEASE CLEAN ME') print('') n = lag if window == 1: win = np.blackman(2 * n) elif window == 2: win = np.hanning(2 * n) elif window == 3: win = np.hamming(2 * n) elif window == 4: win = np.bartlett(2 * n) else: win = np.ones(2 * n) #Apply window fw = np.multiply(f[:n], win[n:]) #Add zeros print('test1', fw.shape) fw = np.concatenate((fw, np.zeros(nzeros))) print('test2', fw.shape) #Apply an average #m = n // av #fww = np.mean(fw.reshape(m,av),axis=1) #Compute FT considering an even function of time ft = np.fft.hfft(fw) ft = dct(fw, type=1) #It is the same as hfft #Compute FT NOT considering an even function of time and takin abs #ff = np.abs(fft(f)) #Add units FT = np.zeros((2, n + nzeros)) print('Unit convertion {}'.format(1. / units.convert.cm2Phz)) FT[0] = np.linspace(0, 1. / (2. * float(step)), n + nzeros) / units.convert.cm2Phz FT[1] = ft.copy() ###FT=np.zeros((2,m)) ###FT[0]=np.linspace(0,1./(2*float(step*av)),m)/ units.convert.cm2Phz ###FT[1]=ft[:m] return FT.T
def smooth_1d_boundaries(input_signal: np.ndarray, window_len: int = 51, window_type: str = 'flat', mode: str = 'mirror', mean_for_short_signals: bool = False) -> np.ndarray: """ apply's a smoothing function to a rolling window for 1d signals using convolution. This function is generally faster than the apply_rolling_fun_1d_boundaries. :param input_signal: the 1d input signal. :param window_len: window length in number of samples. :param window_type: the window type, valid selectors: 'flat': a convolution operator with ones for a standard average smoothing function 'hanning': a hanning window operator 'hamming': a hamming window operator 'bartlett' a bartlett window operator 'blackman' a blackman winowd operator :param mode: valid selectors: 'valid': no padding is applied, the output signal is starts window_len/2 after the signal start and ends window_len/2 before the signal end 'same' the output signal has the same length as the input signal. the input signal is padded with zeros at the start and end to achieve this. 'mirror': the output signal has the same length as the input signal. this is achieved by mirroring the input signal at it's begin and end. :param mean_for_short_signals: return the arithmetic mean if the input signal is shorter than the window length. :return: the resulting 1d output signal. """ if window_len % 2 == 0: raise ValueError("window length needs to be odd") if input_signal.shape[0] < window_len: if mean_for_short_signals: return np.mean(input_signal) raise ValueError("the signal needs to be longer than the window") # moving average if window_type == 'flat': c_filter = np.ones(window_len, dtype='float32') elif window_type == 'hanning': c_filter = np.hanning(window_len) elif window_type == 'hamming': c_filter = np.hamming(window_len) elif window_type == 'bartlett': c_filter = np.bartlett(window_len) elif window_type == 'blackman': c_filter = np.blackman(window_len) else: raise ValueError("unknown window type") c_filter = c_filter / c_filter.sum() filtered_signal = convolve_1d_boundaries(input_signal, c_filter, mode=mode) return filtered_signal
def SelectWindowsFun(self, index): global window if index == 0: window = np.ones(CHUNK) elif index == 1: window = np.hamming(CHUNK) elif index == 2: window = np.hanning(CHUNK) elif index == 3: window = np.bartlett(CHUNK) elif index == 4: window = np.blackman(CHUNK)
def changeWF(self, s): if s == 'boxcar': self.window = np.ones(self.nSamples) elif s == 'hamming': self.window = np.hamming(self.nSamples) elif s == 'blackman': self.window = np.blackman(self.nSamples) elif s == 'bartlett': self.window = np.bartlett(self.nSamples) elif s == 'hanning': self.window = np.hanning(self.nSamples) self.restartAvg()
def applyWindow(self, samples, window='hanning'): if window == 'bartlett': return samples*np.bartlett(len(samples)) elif window == 'blackman': return samples*np.hanning(len(samples)) elif window == 'hamming': return samples*np.hamming(len(samples)) elif window == 'kaiser': return samples*np.kaiser(len(samples)) else: return samples*np.hanning(len(samples))
def removeGating(self, view=False): self.info("Removing gating gain (gating freq.: %.2f Hz)..." % self.gatefreq) N = int(1.0 / (self.gatefreq * self.deltaT)) # samples for one gating gain cycle maxN = int(self.Nfft / 2) # max number of samples gategain = np.ones((maxN, 1)) # vector with length=Nfft/2 n = int(np.floor(maxN / N)) # how many gating cycles are included in time window rest = maxN - (n * N) # how many samples are left after n full cycles? for i in range(n): gategain[i * N:(i + 1) * N, 0] = np.bartlett(N) # creates one triangle for full cycle gategain[n * N:, 0] = np.bartlett(N)[:rest] # creates part of triangle for incomplete cycle self.gategain = 1.0 / gategain[self.samplendx] if view == True: from pylab import show, plot, xlabel, ylabel plot(self.t[:len(self.samplendx)], 10 * np.log10(1.0 / gategain[self.samplendx])) xlabel("Time [ns]") ylabel("Gating Gain [dB]") show() self.data = self.data * self.gategain self.done()
def get_window(self, n=None): if not n: n = self.lframes assert self.window in ['rectangular', 'bartlett', 'blackman', 'hamming', 'hanning'] if self.window == 'rectangular': return np.ones(n) elif self.window == 'bartlett': return np.bartlett(n) elif self.window == 'blackman': return np.blackman(n) elif self.window == 'hamming': return np.hamming(n) else: return np.hanning(n)
def bartlett1d(data, M, **kwargs): """Bartlett 1D filter :Params: - **data**: A :mod:`MV2` variable. - **M**: Size of the Bartlett window. - Keywords are passed to :func:`generic1d`. :Return: - A :mod:`MV2` variable """ weights = N.bartlett(M).astype(data.dtype.char) return generic1d(data, weights, **kwargs)
def pickWinType(winType, N): """ Allow the user to pick a window type""" # Select window type if winType is "bartlett": window = np.bartlett(N) elif winType is "blackman": window = np.blackman(N) elif winType is "hamming": window = np.hamming(N) elif winType is "hanning": window = np.hanning(N) else: window = None return window
def smooth_1D(arr, n=10, smooth_type="flat") -> np.ndarray: """Smooth 1D data using a window function. Edge effects will be present. Parameters ---------- arr : array_like Input array, 1D. n : int (optional) Window length. smooth_type : {'flat', 'hanning', 'hamming', 'bartlett', 'blackman'} (optional) Type of window function to convolve data with. 'flat' window will produce a moving average smoothing. Returns ------- array_like Smoothed 1D array. """ # check array input if arr.ndim != 1: raise wt_exceptions.DimensionalityError(1, arr.ndim) if arr.size < n: message = "Input array size must be larger than window size." raise wt_exceptions.ValueError(message) if n < 3: return arr # construct window array if smooth_type == "flat": w = np.ones(n, dtype=arr.dtype) elif smooth_type == "hanning": w = np.hanning(n) elif smooth_type == "hamming": w = np.hamming(n) elif smooth_type == "bartlett": w = np.bartlett(n) elif smooth_type == "blackman": w = np.blackman(n) else: message = "Given smooth_type, {0}, not available.".format(str(smooth_type)) raise wt_exceptions.ValueError(message) # convolve reflected array with window function out = np.convolve(w / w.sum(), arr, mode="same") return out
def graph_vect_fit(vect_fit, in_paths, env): """ graph four example pContact curves and all the curves of best fit """ if in_paths[0][-1] != '0': return ratios = (ratio for vers,cutoff,ratio in env.load('vect_ratios.0') if vers=='leaf') fits = (fit for vers,cutoff,fit in vect_fit if vers=='leaf') bins = dist_bins(120) miles = np.sqrt([bins[x-1]*bins[x] for x in xrange(2,482)]) with axes('vect_fit',legend_loc=1) as ax: ax.set_xlim(1,10000) ax.set_ylim(1e-8,1e-3) ax.set_xscale('log') ax.set_yscale('log') ax.set_xlabel('distance in miles') ax.set_ylabel('probability of being a contact') colors = iter([FL_PURP,FL_BLUE,FL_GREEN,'k']) labels = iter([ 'edges predicted in nearest 10%', 'edges in 60th to 70th percentile', 'edges in 30th to 40th percentile', 'edges predicted in most distant 10%', ]) for index,(ratio,fit) in enumerate(zip(ratios, fits)): if index%3==0: color = next(colors) label = next(labels) fitstyle='dashed' else: color = ".6" label = None fitstyle='dotted' window = np.bartlett(5) smooth_ratio = np.convolve(ratio,window,mode='same')/sum(window) if label: ax.plot(miles, smooth_ratio, '-', color=color, label=label, linewidth=2) ax.plot(miles, peek.contact_curve(miles,*fit), '-', linewidth=2, linestyle=fitstyle, color=color, )
def pitch_shifter(mono, pitch, time): sigout = np.array(mono,dtype='f4') size = time # delay time in samples delay = np.zeros((size,),dtype='f4') # delay line env = np.bartlett(size) # fade envelope table tap1,tap2,wp = 0 ,size/2,0 #taps for i in range(len(mono)): delay = sigout[i] # fill the delay line frac = tap1 - int(tap1) # first tap, linear interp readout if tap1 < size - 1 : delaynext = delay[tap1+1] # not at boundry else: delaynext = delay[0] # wrap back to the begining sig1 = delay[int(tap1)] + frac*(delaynext - delay[int(tap1)]) # invert and mix frac = tap2 - int(tap2) # second tap, linear interp readout if tap2 < size - 1 : delaynext = delay[tap2+1] else: delaynext = delay[0] sig2 = delay[int(tap2)] + frac*(delaynext - delay[int(tap2)]) # fade envelope positions ep1 = tap1 - wp if ep1 < 0: ep1 += size ep2 = tap2 - wp if ep2 < 0: ep2 += size # combine tap signals sigout[i] = env[ep1]*sig1 + env[ep2]*sig2 # increment tap pos according to pitch transposition tap1 += pitch tap2 = tap1 + size/2 # keep tap pos within the delay memory bounds while tap1 >= size: tap1 -= size while tap1 < 0: tap1 += size while tap2 >= size: tap2 -= size while tap2 < 0: tap2 += size # increment write pos wp += 1 if wp == size: wp = 0 return np.array(sigout,dtype='int16') (sr,signalin) = wavfile.read(sys.argv[2]) pitch = 2.**(float(sys.argv[1])/12.) signalout = zeros(len(signalin)) fund = 131. dsize = int(sr/(fund*0.5)) print dsize signalout = pitchshifter(signalin,signalout,pitch,dsize) wavfile.write(sys.argv[3],sr,array((signalout+signalin)/2., dtype='int16'))
def record(self,forever=True): """record secToRecord seconds of audio.""" while True: if self.threadsDieNow: break try: for i in range(self.chunksToRecord): self.audio[i*self.BUFFERSIZE:(i+1)*self.BUFFERSIZE]=self.getAudio() #self.audio *= numpy.hanning(len(self.audio)) except IOError: print "dropped frames" self.newAudio = False self.dropFrames += 1 if(self.dropFrames >= 5): break else: self.dropFrames = 0 self.newAudio=True self.audio *= numpy.bartlett(len(self.audio)) if forever==False: break
def synthesize(raw_samples, beats, factor): array_shape = (2, raw_samples.shape[1]*2) output = np.zeros(array_shape) offset = 0 val = (factor - 1) / (5*factor + 2) factor1 = 1-2*val factor2 = 1+5*val winsize = 512 window = np.bartlett(winsize*2-1) winsize1 = int(math.floor(winsize * factor1)) winsize2 = int(math.floor(winsize * factor2)) for start, end in beats: frame = raw_samples[:, start:end] # timestretch the eigth notes mid = int(math.floor((frame.shape[1])/2)) left = frame[:, :mid + winsize1] right = frame[:, max(0, mid - winsize2):] left = timestretch(left, factor1) right = timestretch(right, factor2) # taper the ends to 0 to avoid discontinuities left[:, :winsize] = left[:, :winsize] * window[:winsize] left[:, -winsize:] = left[:, -winsize:] * window[-winsize:] right[:, :winsize] = right[:, :winsize] * window[:winsize] right[:, -winsize:] = right[:, -winsize:] * window[-winsize:] # zero pad and add for the overlap overlap = sum_signals([left[:, -winsize:], right[:, :winsize]]) frame = np.hstack([left[:, :-winsize], overlap, right[:, winsize:]]) if offset > 0: overlap = sum_signals([output[:, offset-winsize:offset], frame[:, :winsize]]) output[:, max(0, offset - winsize):offset] = overlap output[:, offset:(offset+frame.shape[1]-winsize)] = frame[:, winsize:] offset += frame.shape[1] - winsize output = output[:, 0:offset] return output
def __call__(self, inputs): if self.get_input('window')=='hanning': from numpy import hanning return hanning(self.get_input('n')) elif self.get_input('window')=='hamming': from numpy import hamming return hamming(self.get_input('n')) elif self.get_input('window')=='bartlett': from numpy import bartlett return bartlett(self.get_input('n')) elif self.get_input('window')=='blackman': from numpy import blackman return blackman(self.get_input('n')) elif self.get_input('window')=='kaiser': from numpy import kaiser return kaiser(self.get_input('n'), self.get_input('beta (kaiser only)')) elif self.get_input('window')=='None': from numpy import kaiser return kaiser(self.get_input('n'), 0.) else: raise ValueError("should never enter here since window values is an enum selector")
def isoluminant(rng, num_cycles=1, num_colors=256, reverse=False, **traits): """ Generator function for a Chaco color scale that cycles through the hues @num_cycles times, while maintaining monotonic luminance (i.e., if it is printed in black and white, then it will be perceptually equal to a linear grayscale. Ported from the Matlab(R) code from: McNames, J. (2006). An effective color scale for simultaneous color and gray-scale publications. IEEE Signal Processing Magazine 23(1), 82--87. """ # Triangular window function window = N.sqrt(3.0) / 8.0 * N.bartlett(num_colors) # Independent variable t = N.linspace(N.sqrt(3.0), 0.0, num_colors) # Initial values operand = (t - N.sqrt(3.0) / 2.0) * num_cycles * 2.0 * N.pi / N.sqrt(3.0) r0 = t g0 = window * N.cos(operand) b0 = window * N.sin(operand) # Convert RG to polar, rotate, and convert back r1, g1 = _rotate(r0, g0, N.arcsin(1.0 / N.sqrt(3.0))) b1 = b0 # Convert RB to polar, rotate, and convert back r2, b2 = _rotate(r1, b1, N.pi / 4.0) g2 = g1 # Ensure finite precision effects don't exceed unit cube boundaries r = r2.clip(0.0, 1.0) g = g2.clip(0.0, 1.0) b = b2.clip(0.0, 1.0) the_map = N.vstack((r, g, b)).T return ColorMapper.from_palette_array(the_map[::-1 if reverse else 1], range=rng, **traits)
def window_bartlett(N): r"""Bartlett window (wrapping of numpy.bartlett) also known as Fejer :param int N: window length The Bartlett window is defined as .. math:: w(n) = \frac{2}{N-1} \left( \frac{N-1}{2} - \left|n - \frac{N-1}{2}\right| \right) .. plot:: :width: 80% :include-source: from spectrum import window_visu window_visu(64, 'bartlett') .. seealso:: numpy.bartlett, :func:`create_window`, :class:`Window`. """ from numpy import bartlett return bartlett(N)
def update(self): if self.structure: N_lame = self.N_lame - self.struct_N else: N_lame = self.N_lame damp = lambda t: 1.0 - np.exp( -np.abs(np.mod(t + self.period / 2, self.period) - self.period / 2) / self.damp_tau ) N_periods = 1 i = np.mod(np.int(self.t / self.period * self.vague.shape[2] / N_periods), self.vague.shape[2]) surface = np.zeros_like(self.lames[2, :N_lame]) # for k, amp in zip([-2, -1, 0, 1, 2], [.125, .25, .5, .25, .125]): # surface += amp * self.vague[self.x_offset:(self.x_offset+N_lame), self.y_offset, self.t_offset+i+k] surface = self.vague[self.x_offset : (self.x_offset + N_lame), self.y_offset, self.t_offset + i] surface = np.convolve(surface, np.arange(5), mode="same") dsurface = np.gradient(surface) dsurface *= np.bartlett(N_lame) # print(dsurface.mean(), dsurface.max(), damp(self.t)) dsurface /= np.abs(dsurface).max() dsurface *= np.tan(np.pi / 32) # maximum angle achieved self.lames[2, :N_lame] = np.arctan(dsurface) * damp(self.t)