def test_frequency(self): import scipy.signal as ss nsteps = 10000 dt = 0.1 t,w = self.regular_integrator.run(self.regular_w0, dt=dt, nsteps=nsteps) f,fft = fft_orbit(t, w) peak_ix = ss.find_peaks_cwt(fft[:,0], widths=np.linspace(dt*2, dt*100, 10)) print(peak_ix) plt.clf() plt.axvline(self.regular_par[1]/(2*np.pi), linewidth=3., alpha=0.35, color='b') plt.axvline(1/(2*np.pi), linewidth=3., alpha=0.35, color='r') plt.semilogx(f[:,0], fft[:,0], marker=None) plt.savefig(os.path.join(plot_path,"pend_fft_regular.png")) # ---------------------------------------------------------------------- t,w = self.chaotic_integrator.run(self.chaotic_w0, dt=dt, nsteps=nsteps) f,fft = fft_orbit(t, w) peak_ix = ss.find_peaks_cwt(fft[:,0], widths=np.linspace(dt*2, dt*100, 10)) print(peak_ix) plt.clf() plt.axvline(self.chaotic_par[1]/(2*np.pi), linewidth=3., alpha=0.35, color='b') plt.axvline(1/(2*np.pi), linewidth=3., alpha=0.35, color='r') plt.semilogx(f[:,0], fft[:,0], marker=None) plt.savefig(os.path.join(plot_path,"pend_fft_chaotic.png"))
def fft_analysis(self,position=None): ''' Performes Fast fourier analysis on data sets and returns either five most significant frequencies or numpy arrays of frequencies and power spectrum when specific position number is passed as parameter. ''' if position is None: freqs_5=np.array([]) for dp, pr in zip(self.data_points, self.prumery): time_step=(dp.data_time[-1]-dp.data_time[0])/dp.data.size ps=np.abs(np.fft.rfft(dp.data-pr))**2 #Power spectrum freqs = np.fft.rfftfreq(dp.data.size, time_step) peakind = signal.find_peaks_cwt(ps, np.arange(1,50)) freqs_5 = np.append(freqs_5,freqs[peakind[:5]]) return freqs[peakind[:5]] #Returns first five most significant frequencies else: #Returns arrays of frequencies and power spectrum and five most significant frequencies. for dp,pr in zip(self.data_points, self.prumery): if position == dp.position_no: time_step=(dp.data_time[-1]-dp.data_time[0])/dp.data.size ps=np.abs(np.fft.rfft(dp.data-pr))**2 #Power spectrum #ps=np.angle(np.fft.rfft(dp.data-pr)) #Phase spectrum #ps=20*np.log10(np.abs(np.fft.rfft(dp.data-pr))) #Amplitude spectrum in [dB] freqs = np.fft.rfftfreq(dp.data.size, time_step) peakind = signal.find_peaks_cwt(ps, np.arange(1,50)) return freqs, ps, freqs[peakind[:5]]
def find_threshold(filename, base_offset=0, pos_pol=True, **kws): with trace_gen(filename, base_offset=base_offset, **kws) as gen: if pos_pol: min_data = [event[100:-100].max() for event in gen] else: min_data = [event[100:-100].min() for event in gen] hist = histogram(min_data, bins=2048) # second = 2 if pos_pol else -2 # index = argrelmax(hist[0][hist[0] != 0], order=5)[0][second] second = 1 if pos_pol else -1 try: index = find_peaks_cwt(hist[0][hist[0] != 0], arange(10, 40))[second] except IndexError: index = find_peaks_cwt(hist[0][hist[0] != 0], arange(10, 40))[0] return hist[1][hist[0] != 0][index] * 3 / 4, len(min_data)
def find_peaks(x, y, widthrange, rel_threshold=0.1): """Peak-finding in a 2d dataset. Parameters ---------- x, y : array_like Input arrays. widthrange : tuple Lower and upper limit of peak widths to find. rel_threshold : float, optional Peaks with a height lower than this value times the height of the maximum in 'y' are ignored. Returns ------- list Array indices of where the peaks were found in 'y'. See Also -------- scipy.signal.find_peaks_cwt : Peak-finding using a continous wavelet transform technique. """ dx = abs(x[1] - x[0]) minwidth, maxwidth = widthrange widths = np.arange(floor(minwidth/dx), ceil(maxwidth/dx)) peakpos = find_peaks_cwt(y, widths) maxy = max(y) return [pos for pos in peakpos if y[pos] >= rel_threshold*maxy]
def execute_analysis(self): """ The continuous wavelet transform peak identification algorithm from scipy.signal.find_peaks_cwt(). In openmsi it is renamed "findpeaks_cwt" with one fewer underscore. """ from scipy import signal as sig msidata = self['msidata'] #now in memory as hdf5 cube or np.ndarray widths = self['widths'] #should already be numpy ndarray min_snr = self['min_snr'] shape_x = msidata.shape[0] shape_y = msidata.shape[1] mzindices = [] for xi in xrange(0, shape_x): for yi in xrange(0, shape_y): print xi, yi # Load the spectrum m = msidata[xi, yi, :] # find indices of m where peaks are peak_indices = sig.find_peaks_cwt(m, widths=widths, wavelet=sig.ricker, min_snr=min_snr) mzindices.append(peak_indices) return mzindices
def checkerboard_matrix_filtering(similarity_matrix, kernel_width, peak_range): """ Moving the checkerboard matrix over the main diagonal of the similarity matrix one sample at a time. :param similarity_matrix: :param peak_range: the number of samples in which the peak detection algorithms finds a peak (TODO:clarify) :param kernel_width: the size of one quarter of the checkerboard matrix :return: peaks and convolution values """ checkerboard_matrix = get_checkerboard_matrix(kernel_width) # The values calculated in this step are starting from the 'kernel_width' position and ending # at length - kernel_width d = [] for i in range(0, similarity_matrix.shape[0] - 2 * kernel_width): base = similarity_matrix[i:i + kernel_width * 2, i:i + kernel_width * 2] d.append(np.sum(np.multiply(base, checkerboard_matrix))) # The missing values from 0 to kernel_width are calculated here top_left_d = [] for i in range(0, kernel_width): base = similarity_matrix[0:i + kernel_width, 0:i + kernel_width] top_left_d.append(np.sum(np.multiply(base, checkerboard_matrix[kernel_width - i:, kernel_width - i:]))) # The missing kernel_width values at the bottom right are set to 0 convolution_values = top_left_d + d + [0 for i in range(0, kernel_width)] peaks = find_peaks_cwt(convolution_values, np.arange(1, peak_range)) peaks = [0] + peaks + [len(convolution_values)-1] return peaks, convolution_values
def determine_step_process(dwell, percentage, start, end, jump): attempt = np.array([]) firstpeak = np.array([]) lastpeak = np.array([]) suminterval = np.array([]) for window in np.arange(start, (end+jump*2), jump*2): #print 'testing interval:', interval, #window must be an odd number if window/jump % 2 == 0: window += jump ma = convertma(dwell, window, jump) interval = window / jump std = movstd(ma, interval) oripeak = signal.find_peaks_cwt(std, np.arange(interval * percentage, interval, interval*0.05)) if oripeak != []: peaklength = np.append(oripeak, np.cumsum(dwell)[-1]/jump-1) - np.append(0, oripeak) if np.amin(peaklength) > interval: attempt = np.append(attempt, 1) else: attempt = np.append(attempt, 0) firstpeak = np.append(firstpeak, oripeak[0]) lastpeak = np.append(lastpeak, oripeak[-1]) suminterval = np.append(suminterval, interval) np.savez(pathfilename+'_'+str(start)+'_'+str(end), attempt = attempt, firstpeak = firstpeak, lastpeak = lastpeak, suminterval = suminterval)
def get_peaks(self, data, domain): thresh = 1.1 # TODO: Make this adjustable - issue#2 peakrange = np.arange(1, 100) rawpeaks = np.array(sc_sg.find_peaks_cwt(data, peakrange)) rawpeaks = rawpeaks[rawpeaks > rawpeaks.min()] # Remove leftmost peak (false positive at input) threshpeaks = rawpeaks[data[rawpeaks] > thresh] return np.array([domain[threshpeaks], data[threshpeaks]]).T
def calculatepeak(step, jump, dwell): totalpeak = np.array([]) totalinterval = np.array([]) totalfilterpeak = np.array([]) totalfilterinterval = np.array([]) for window in step: if window/jump % 2 == 0: window += jump ma = convertma(dwell, window, jump) interval = window / jump #Calculate the moving average of the result std = movstd(ma, interval) #Calculate the moving standard deviation of the result oripeak = signal.find_peaks_cwt(std, np.arange(interval * percentage, interval, interval*0.05)) oristdamp = std[oripeak] #Obtain the standard deviation peak and the amplitude of the peak if len(oripeak) == 0: pass else: oripopendiffpeak = calpopendiffpeak(dwell, oripeak) #Calculate the difference in Popen from the raw data using the time of the peak oripopendiffstdamp = oristdamp / np.sqrt(1.0/12) #Calculate the difference in Popen based on the value of the standard deviation peak filterpeak, oripopendiffstdamp, oripopendiffpeak = filterpeaktime(dwell, std, oripeak, oripopendiffstdamp, oripopendiffpeak) totalpeak = np.append(totalpeak, oripeak) totalinterval = np.append(totalinterval, [interval] * len(oripeak)) totalfilterpeak = np.append(totalfilterpeak, filterpeak) totalfilterinterval = np.append(totalfilterinterval, [interval] * len(filterpeak)) for iprint in range(len(filterpeak)): print 'peak', filterpeak[iprint], 'Popen(avg)', oripopendiffpeak[iprint], 'Popen(std)', oripopendiffstdamp[iprint] np.savez(pathfilename+str(step[0])+'_'+str(step[-1]), totalpeak=totalpeak, totalinterval=totalinterval, totalfilterpeak=totalfilterpeak, totalfilterinterval=totalfilterinterval)
def curPeakdetect(y, x): if ds["type"] == "cwt": _maxidx = find_peaks_cwt(y, ds["widths"]) _max = zip(x[_maxidx], y[_maxidx]) else: _max, _ = peakdetect(y, x, lookahead=ds["lookahead"], delta=ds["delta"]) return _max
def main(data): start = time.time() T = 1.0/SAMPLING_RATE # sampling interval Fs = 1.0 / T ir_data, bpm_data, avg_bpm_data = clean_data(data) hr_data = zero_mean(ir_data) cutoff_bpm = [50.0, 200.0] cutoff_hz = [x/60 for x in cutoff_bpm] # cutoff frequency in HZ cutoff = [x/(Fs/2) for x in cutoff_hz] [b, a] = signal.butter(2, cutoff, 'bandpass') # 2nd order butterworth filter is_valid = check_data_good(ir_data) if (is_valid == False): return "Heart Rate: Please Place Sensor on Feet" else: # filter out the noise from signal hr_filt = signal.lfilter(b, a, hr_data) pks = signal.find_peaks_cwt(hr_filt, np.arange(3, 10)) num_pks = len(pks) beats_from_peaks = num_pks/2 bpm_from_peaks = beats_from_peaks*60/TIME_SEC print("HR Found from Beats is = " + str(bpm_from_peaks) + " BPM") time_btw_peaks = sum(np.array(pks[1:num_pks]) - np.array(pks[0:-1]))/(num_pks - 1) bpm_from_peaks = SAMPLING_RATE*60/time_btw_peaks/2 print("HR Found from Time is = " + str(bpm_from_peaks) + " BPM") end = time.time() print("total time = " + str(end - start)) return bpm_from_peaks
def sort_spikes(sweep): filtered = np.array(sweep) - medfilt(sweep, 51) pks = find_peaks_cwt(filtered, np.arange(9, 20)) offsets = np.zeros_like(pks) datamtrx = np.zeros(shape=(80, len(pks[3:]))) for i, pk in enumerate(pks[3:]): offset = np.argmax(filtered[pk - 40 : pk + 40]) - 40 datamtrx[:, i] = filtered[pk - 40 + offset : pk + 40 + offset] offsets[i + 3] = offset # plb.plot(filtered[pk-40+offset:pk+20+offset],color = 'k', alpha = 0.1) from scipy.linalg import svd U, s, Vt = svd(datamtrx, full_matrices=False) V = Vt.T ind = np.argsort(s)[::-1] U = U[:, ind] s = s[ind] V = V[:, ind] features = V es, idx = kmeans2(features[:, 0:4], 4, iter=50) colors = [([1, 0, 1], [1, 0, 0], [0, 0, 1], [0, 1, 1])[i] for i in idx]
def autocorrelation(x, y): # normalise so range is 2 - no idea if this is the right thing to do... y = 2*y/(max(y)-min(y)) y = y-np.median(y) # calculate autocorrelation fn lags, acf, lines, axis = pb.acorr(y, maxlags = len(y)/2.) # halve acf and find peaks acf = acf[len(acf)/2.:] pks = find_peaks_cwt(acf, np.arange(10, 20)) # play with these params, #they define peak widths peaks = pks[1:] # lose the first peak period = peaks[0]*cadence print 'acf period = ', period pl.clf() pl.subplot(2,1,1) pl.plot(x[:4000], y[:4000], 'k.') pl.title('Period = %s' %period) pl.subplot(2,1,2) pl.plot(np.arange(5000)*cadence, acf[:5000]) # pl.plot(np.arange(len(acf))*cadence, acf) [pl.axvline(peak*cadence, linestyle = '--', color = 'r') for peak in peaks] pl.xlim(0, 5000*cadence) pl.savefig('/Users/angusr/Python/george/acf/%sacf' %int(KID)) # np.savetxt('/Users/angusr/Python/george/acf/%sacf_per.txt'%int(KID), period) return period
def call_boundary_peaks(Pb): n = len(Pb) peaks = find_peaks_cwt( Pb, np.array([0.5]), wavelet=None, max_distances=None, gap_thresh=None, min_length=None, min_snr=1, noise_perc=10) regions = [] for i in peaks: top = Pb[i] x = [l for l in range(1, min(i, 5)) if Pb[i-l] > 0.4*top] y = [r for r in range(1, min(n-i, 5)) if Pb[i+r] > 0.4*top] start = (i - x[-1]) if len(x) else i end = (i + y[-1]) if len(y) else i regions.append([start-1, end+1, top]) if len(regions): starts, ends, values = zip(*regions) else: starts, ends, values = [], [], [] return np.array(starts), np.array(ends), np.array(values)
def wavelet_peaks(vector, *args, **kwargs): """ This is the function that will be mapped by multiprocess. This is a wrapper around the scipy function. It uses a parameter - wavelet_widths that is configured outside this function. Parameters ---------- vector : 1D numpy array Feature vector containing peaks Returns ------- peak_indices : list List of indices of peaks within the prescribed peak widths """ try: peak_width_bounds = kwargs.get('peak_widths') kwargs.pop('peak_widths') peak_width_step = kwargs.get('peak_step', 20) kwargs.pop('peak_step') # The below numpy array is used to configure the returned function wpeaks wavelet_widths = np.linspace(peak_width_bounds[0], peak_width_bounds[1], peak_width_step) peak_indices = find_peaks_cwt(np.abs(vector), wavelet_widths, **kwargs) return peak_indices except KeyError: warn('Error: Please specify "peak_widths" kwarg to use this method')
def find_peak_data(data, n, spec=False): indices = find_peaks_cwt(data, arange(30, 80)) values = [data[i] for i in indices] params = [val for i in range(n) for val in [values[i], indices[i], 30]] if spec: params.extend((0.2, 1)) return params
def dih_plotter3(dirname,savename,numplot): inlist = dih_tablereader(dirname) plotlist = inlist[0:numplot] colors = iter(cm.rainbow(np.linspace(0,1,len(plotlist)))) #creates color table for memberlist in plotlist: x = memberlist[0] #x coordinate data y = memberlist[1] #y coordinate data ysmooth = dih_boxcar(y) xshort = x[0:len(ysmooth)] peaklist =signal.find_peaks_cwt(ysmooth, np.arange(1,30))#continuous wavelet transformation plt.plot(xshort,ysmooth,color = next(colors)) for num in peaklist: plt.plot(xshort[num],ysmooth[num],'gD')#places markers on peaks peak = max(ysmooth) peaklist2 = [i for i, j in enumerate(ysmooth) if j == peak] for num in peaklist2: plt.plot(xshort[num],ysmooth[num],'rD') #finish up plot characteristics plt.title('Super Most Awesome Graph!') plt.ylabel('Flux') plt.xlabel('Time') pylab.ylim([-5,5]) pylab.xlim([0,6.3]) plt.savefig(savename)#saves postscript file return plotlist
def detect_lines(w_arr, f_arr, sigma=3, bsigma=None, niter=5, mask=None, kern=default_kernal, center=False): """Detect lines goes through a 1-D spectra and detect peaks w_arr--xaxis array (pixels, wavelength, etc) f_arr--yaxis array (flux, counts, etc) sigma--Threshold for detecting sources bsigma--Threshold for determining background statistics niter--iterations to determine background center--return centroids and not pixels mask--Pixels not to use """ # set up the variables if bsigma is None: bsigma = sigma if mask: f_arr = f_arr[mask] w_arr = w_arr[mask] # find all peaks xp = signal.find_peaks_cwt(f_arr, np.array([sigma])) xp = np.array(xp) # set the output values if center: xdiff = int(0.5 * len(kern) + 1) xp = xp * 1.0 for i in range(len(xp)): xp[i] = mcentroid(w_arr, f_arr, kern=kern, xdiff=xdiff, xc=w_arr[xp[i]]) return xp
def FindPeaksInSpectrum (S): "Find peaks using scipy" Peaks = [] peakind = signal.find_peaks_cwt(S, numpy.arange(1, 10), min_length=16) return peakind
def predict(fec,widths=None,min_snr=10,**kwargs): split_fec = Analysis.zero_and_split_force_extension_curve(fec) force = split_fec.retract.Force if (widths is None): expected_max_log_width = int(np.ceil(np.log2(0.05 * force.size))) widths = np.logspace(0,expected_max_log_width,base=2,num=3) peak_indices = find_peaks_cwt(force,widths=widths,min_snr=min_snr,**kwargs) return peak_indices
def findpeaks(resdata): respeaks = [] for i in resdata: peaks = [] for j in range(i.shape[0]): peaks.append(np.mean(i[j,signal.find_peaks_cwt(list(i[j,:]),np.arange(1,50))])) respeaks.append(peaks) return respeaks
def mean_hr_bpm(filename): """module to take user input for time scale, and analyze ECG input in \ that time scale :param filename: the name of a file located in the /test_data folder \ entered as a string :returns heartrate: heartrate during a specific period as a float :raises IOError: raised if user tries to input value not accepted by \ program :raises ValueError: raised if the generally accepted values fall outside \ of the signal time range """ # time_input = input("Please input time (10 sec or 20 sec): ") time_input = "10 sec" time_vector = extract_time_data(filename) if np.max(time_vector) >= float(time_input[:-4]): if str(time_input) == "10" + " sec": ind = np.where(time_vector == 10)[0] df = import_data(filename) values = df.values trimmed = values[np.arange(0, ind), 1] trim_norm = trimmed - np.mean(trimmed) template = pd.read_csv("test_data/template.csv", header=None) norm_template = extract_template_data(template) corr = np.correlate(norm_template, trim_norm, mode="full") peaks = signal.find_peaks_cwt(corr, np.arange(1, 300)) heartrate = len(peaks) / (10/60) elif str(time_input) == "20" + " sec": ind = np.where(time_vector == 20)[0] df = import_data(filename) values = df.values trimmed = values[np.arange(0, ind), 1] trim_norm = trimmed - np.mean(trimmed) template = pd.read_csv("test_data/template.csv", header=None) norm_template = extract_template_data(template) corr = np.correlate(norm_template, trim_norm, mode="full") peaks = signal.find_peaks_cwt(corr, np.arange(1, 300)) heartrate = len(peaks) / (20/60) else: raise IOError("Invalid input. Try Again (Make sure to include " "sec)") else: raise ValueError("Attempted input outside signal range") return heartrate
def find_peaks(img,thresh): img_half=img[img.shape[0]/2:,:,0] data = np.sum(img_half, axis=0) filtered = scipy.ndimage.filters.gaussian_filter1d(data,20) xs = np.arange(len(filtered)) peak_ind = signal.find_peaks_cwt(filtered, np.arange(20,300)) peaks = np.array(peak_ind) peaks = peaks[filtered[peak_ind]>thresh] return peaks,filtered
def max_data(x_data, y_data, width, no_peaks): peak_ind = signal.find_peaks_cwt(y_data, np.array([width]*(no_peaks))) #print peakind #plt.show() x_peaks = map(lambda x: x_data[x], peak_ind) #print time_peaks y_peaks = map(lambda x: y_data[x], peak_ind) #print p_peaks return x_peaks, y_peaks
def findAbsorptionPeaks_TESTING(self): #this method might lead to better results than the findAbsorptionLines method? #movav=self.getmovingAveragedData() hlog=-20*py.log10(self.getFAbs()) etalon=self.getEtalonSpacing() Ns=int(etalon/self.getfbins()) Ns=py.arange(max(1,Ns-10),Ns+10,1) peaks=signal.find_peaks_cwt((hlog),Ns) return peaks
def __init__(self, data): self.data = data self.data_inv = [-d for d in data] # First we find the peaks in the inverted data which could be from the Receiver self.trcal = -1 self.peaks = find_peaks_cwt(self.data_inv, np.arange(1, 0.9*tari)) self.peaks = [p for p in self.peaks if (self.data[(int)(p - 0.5*tari)] - self.data[p]) > min_rt_th] self.cur_peak = 0 self.peak_cnt = len(self.peaks)
def PeakFinder(array): x = array[:,0] y = array[:,1] from scipy import signal import numpy as np maxima = signal.find_peaks_cwt(y, np.arange(1,10)) # Working of this algorithm found in the following link # http://bioinformatics.oxfordjournals.org/content/22/17/2059.long difference = np.diff(maxima) # This generates an array of differences between the indices of the # local maxima which will be used below to find periodicity. return difference
def find_highest_two_peaks( smoothed_shots): peak_positions_in_pixels=np.zeros( (smoothed_shots.shape[0],2) ) for shot_idx in range(smoothed_shots.shape[0]): peak_pos=find_peaks_cwt(smoothed_shots[shot_idx], range(1,11)) peak_values=smoothed_shots[shot_idx,peak_pos] max_2 = np.argsort(peak_values)[-2:] peak_positions_in_pixels[shot_idx]=np.where(mask)[0][peak_pos[max_2] ] return peak_positions_in_pixels.astype(int)
def updateWave(): global data, fData, time, ptList, freqList, line freqList = [] if len(ptList) < 2: fData = np.zeros(CHUNK, dtype=float) data = np.uint8(fData) return elif len(ptList) == 2: dist = np.sqrt((ptList[0]['xPos'] - ptList[1]['xPos'])**2. + (ptList[0]['yPos'] - ptList[1]['yPos'])**2. + (ptList[0]['depth'] - ptList[1]['depth'])**2.) freqList.append(int(BASEFREQ/dist)) else: for point1 in ptList: for point2 in ptList: if point1 is not point2: dist = np.sqrt((point1['xPos'] - point2['xPos'])**2. + (point1['yPos'] - point2['yPos'])**2. + (point1['depth'] - point2['depth'])**2.) freqList.append(int(BASEFREQ/dist)) fData = np.zeros(CHUNK, dtype=float) for freq in freqList: iFreq = float(int(freq/10.)) ampl = 1./freq fData += ampl * np.sin(time*iFreq) fData = fData / np.max(np.abs(fData)) * 127 + 128 yData = np.abs(np.fft.fft(fData[:PLOTWIDTH])) yData /= 100. # yData /= yData.max() # yData = np.log(yData) yDataSwap = np.fft.fftshift(yData) line.set_ydata(yDataSwap) peakIndices = signal.find_peaks_cwt(yDataSwap, np.asarray([0.1, 0.11, 0.12]), min_snr=1.) nPeaks = len(peakIndices) peaksTxt.set_text('Peaks %d\nFreqs %d'%(nPeaks,int((nPeaks-1)/2))) lineIx = 0 for peak in peakArray: peak[0].set_visible(False) for peakIx in peakIndices: freqAt = float(plotFreq[peakIx]) # print '%5.2f\t'%freqAt, peakArray[lineIx][0].set_xdata((freqAt, freqAt)) peakArray[lineIx][0].set_ydata([0., 1000]) peakArray[lineIx][0].set_visible(True) lineIx += 1 # plt.pause(.001) fig.canvas.draw() data = np.uint8(fData)
def find_peaks( signal, min_height = 25, min_peak_ratio = 0.5, max_peak_ratio = 7, ladder=False ): """ find peaks in the signal spectrum return [ (indice, height), ... ] we differentiate settings between ladder (since we know the absolute size of the peaks) and the sample """ if ladder: widths = np.arange(5,15) else: widths = np.arange(5,15) #peak_index = find_peaks_cwt( signal, widths, min_snr = 1, min_length = 1, max_distances = widths/23, noise_perc = 25, gap_thresh = 50 ) peak_index = find_peaks_cwt( signal, widths ) if not peak_index: return [] peaks = [ ( x, signal[x] ) for x in peak_index ] print("initial peaks: %d" % len(peaks)) pt.plot( signal ) plot_peaks( peaks, 'ro' ) pt.show() pt.close() # filter for min_height peaks = [ peak for peak in peaks if peak[1] > min_height ] pt.plot( signal ) plot_peaks( peaks, 'ro' ) pt.show() pt.close() if not peaks: return [] heights = [ peak[1] for peak in peaks ] # filter for maximum peak ratio with 75th percentile upper_percentile = np.percentile( heights, 75 ) max_height = upper_percentile * max_peak_ratio print("heights:") print(heights) print("upper_percentile:", upper_percentile) print("max_height:", max_height) peaks = [ peak for peak in peaks if peak[1] < max_height ] pt.plot( signal ) plot_peaks( peaks, 'ro' ) pt.show() pt.close() return peaks
def firstReflectionIndex(self): ''' :return: a guess at the location of the first reflection using scipy find_peaks_cwt ''' peak = self.peakIndex() endIndex = self.size() lengthMs = (endIndex - peak) / self._fs * 1000 if lengthMs > 50.0: search_end = peak + (round(self._fs / 20)) logger.debug( f"{self} has {round(lengthMs)}ms from peak to end, searching 50ms ({peak}:{search_end}) for 1st reflection") to_search = self.samples[peak:search_end] else: to_search = self.samples[peak:] return next((i + peak for i in signal.find_peaks_cwt(to_search, np.arange(10, 20)) if i > 40), self.size() - 1)
def find_local_minima(self, f_array, window=101): """ Find the local minima of an absorption profile. Args: f_array: flux array window: smoothing window, pixels Returns: indices of local minima in flux_array """ # smooth flux profile smoothed_flux = savgol_filter(f_array, window, 1) return find_peaks_cwt(smoothed_flux * -1, np.array([window / 3]))
def getpeaks(hist, max_width, min_width): histcounts = [x[1] for x in hist] if not max_width: mymax = 1000 else: mymax = max_width if not min_width: mymin = 1 else: mymin = min_width if len(histcounts) > 0 and mymin and mymax: out = find_peaks_cwt(histcounts, [x for x in range(mymin, mymax + 1)]) else: out = array([], dtype=float64) return (out)
def estimate_pars(pars, window=999): """ estimate parameters for curve fitting """ genome, sample, xy, length = pars if xy is False: return False x, y = xy y_med = [x, median_filter(y)] # find indexes of peaks and troughs pks = signal.find_peaks_cwt(y, np.arange(100, 1000, 10000)) trs = signal.find_peaks_cwt([-i for i in y], np.arange(100, 1000, 10000)) # find positions on genome for peaks and troughs pks = [[y_med[0][i], y_med[1][i]] for i in pks] trs = [[y_med[0][i], y_med[1][i]] for i in trs] # find best pk/tr pair based on greatest distance in coverage # and position on genome ori, ter = check_peaks([pks, trs], length) x1, x2 = ori[0], ter[0] y1, y2 = ori[1], ter[1] if genome is not None: return genome, sample, (x1, x2, y1, y2, y_med) else: return x1, x2, y1, y2, y_med
def fit_algorithm(self): self.X = self.X[:, 0] # TODO - this is definitely not the correct long term strategy num_samples = len(self.X) width_array = np.asarray(np.arange(1, num_samples / self.parms['peak_width'])) if self.algorithm == "cwt": indexes = find_peaks_cwt(self.X, width_array, gap_thresh=self.parms['gap_threshold'], min_snr=self.parms['min_snr'], noise_perc=self.parms['noise_perc']) elif self.algorithm == "matlab_findpeaks": mph = self.parms['mph'] if mph: mph = float(mph) indexes = detect_peaks(self.X, mph=mph, mpd=self.parms['mpd'], threshold=self.parms['threshold'], edge=self.parms['edge'], kpsh=self.parms['kpsh'], valley=self.parms['valley']) self.result["indexes"] = indexes.tolist()
def find_peaks_cwt(self, vector, *args, **kwargs): """ Find peaks function based on scipy.signal package Parameters ---------- vector: CPD scores array args: see docs for https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.find_peaks_cwt.html kwargs Returns ------- Array with location of peaks """ peaks = find_peaks_cwt(vector, *args, **kwargs) return peaks
def get_CorrStuff(st): ntr = len(st) sps = st[0].stats.sampling_rate strong_filter = np.ones(int(sps)) / float(sps) min_peak_height = 0.4 # This value is sligtly greater than the matlab one, to account for # differences in floating precision CorPeakNumber = np.empty(ntr, dtype=int) INT1 = np.empty(ntr, dtype=float) INT2 = np.empty(ntr, dtype=float) INT_RATIO = np.empty(ntr, dtype=float) for i in xrange(ntr): cor = np.correlate(st[i].data, st[i].data, mode='full') cor = cor / np.max(cor) # find number of peaks cor_env = np.abs(hilbert(cor)) cor_smooth = l2filter(strong_filter, 1, cor_env) cor_smooth2 = l2filter(strong_filter, 1, cor_smooth / np.max(cor_smooth)) ipeaks = find_peaks_cwt(cor_smooth2, np.arange(1, len(cor_smooth2) / 2)) n_peaks = 0 for ip in ipeaks: if cor_smooth2[ip] > min_peak_height: n_peaks += 1 CorPeakNumber[i] = n_peaks # integrate over bands npts = len(cor_smooth) ilag_0 = np.argmax(cor_smooth) + 1 ilag_third = ilag_0 + npts / 6 # note that these integrals are flase really (dt is not correct) max_cor = np.max(cor_smooth) int1 = np.trapz(cor_smooth[ilag_0:ilag_third + 1] / max_cor) int2 = np.trapz(cor_smooth[ilag_third:] / max_cor) int_ratio = int1 / int2 INT1[i] = int1 INT2[i] = int2 INT_RATIO[i] = int_ratio return CorPeakNumber, INT1, INT2, INT_RATIO
def init(): sdr = RtlSdr() # configure device sdr.sample_rate = SAMPLE_RATE sdr.center_freq = CENTER_FREQ sdr.gain = GAIN # configure ROS stuff rospy.init_node('sdrtag') detections_pub = rospy.Publisher('sdr/detections', SDRTagDetections, queue_size=10) rate = rospy.Rate(PUB_RATE) while not rospy.is_shutdown(): samples = sdr.read_samples(256 * 1024) [psd, freq] = plt.psd(samples, NFFT=1024, Fs=sdr.sample_rate / 1e6, Fc=sdr.center_freq / 1e6) peaks = signal.find_peaks_cwt(psd, np.array([[WIDTH]])) # Publish detections detections = SDRTagDetections() for peak in peaks: detection = SDRTagDetection() detection.freq = freq[peak] detection.psd = psd[peak] detections.detections.append(detection) detections_pub.publish(detections) # Optional display if PLOT: plt.hold(True) plt.xlabel('Frequency (MHz)') plt.ylabel('Relative power (dB)') plt.plot(freq[peaks], 10 * np.log10(abs(psd[peaks])), 'rs', markersize=20, markerfacecolor='r') plt.pause(0.0001) plt.hold(False) rate.sleep()
def histogram_lane_detection(img, steps, search_window, h_window): """ Try to detect lane line pixels by applying a sliding histogram :param img: binary mask :param steps: number of steps for the sliding histogram :param search_window: window which limits the horizontal search space :param h_window: window size for horizontal histogram smoothing :return: x, y of detected pixels """ all_x = [] all_y = [] masked_img = img[:, search_window[0]: search_window[1]] # y pixels between two x-coordinates pixels_per_step = img.shape[ 0] // steps # number of pixels each step will the windows will take for i in range(steps): start = masked_img.shape[0] - (i * pixels_per_step) end = start - pixels_per_step histogram = np.sum(masked_img[end:start, :], axis=0) histogram_smooth = signal.medfilt(histogram, h_window) # normalize the histogram peaks = np.array( signal.find_peaks_cwt(histogram_smooth, np.arange(1, 5))) highest_peak = highest_n_peaks(histogram_smooth, peaks, n=1, threshold=5) if len(highest_peak) == 1: highest_peak = highest_peak[0] center = (start + end) // 2 x, y = get_pixel_in_window(masked_img, highest_peak, center, pixels_per_step) all_x.extend(x) all_y.extend(y) all_x = np.array(all_x) + search_window[0] all_y = np.array(all_y) return all_x, all_y
def findPeak(): region = np.where((x_data >= self.lower) & (x_data <= self.upper)) sub_data = y_data[region] sub_region = x_data[region] algorithm = self.algorithm.currentText() shape = self.shape.currentText() if shape == "Peak": const = 1 else: const = -1 sub_data = sub_data * const if algorithm == "Extremum": peak = np.max(sub_data) idx = np.where(sub_data == peak) x = sub_region[idx][0] y = sub_data[idx][0] * const self.peakCenter.setValue(x) return self.renderPeakPoint([x, y]) elif algorithm == "Matlab Like": indexes = find_peaks( sub_data, height=self.amplitude.value(), #低于指定高度忽略 threshold=self.threshold.value(), #相邻两点高度差 distance=self.detectDis.value(), #两峰间距 width=self.peakWidth.value() #峰宽 )[0] if np.size(indexes) == 0: return idx = np.where(sub_data == np.max(sub_data[indexes])) x = sub_region[idx][0] y = sub_data[idx][0] * const self.peakCenter.setValue(x) return self.renderPeakPoint([x, y]) elif algorithm == "Wavelet Transform": indexes = find_peaks_cwt( sub_data, widths=self.peakWidth.value(), #峰宽 max_distances=self.detectDis.value(), #两峰间距 noise_perc=self.noisePrt.value())[0] if np.size(indexes) == 0: return idx = np.where(sub_data == np.max(sub_data[indexes])) x = sub_region[idx][0] y = sub_data[idx][0] * const self.peakCenter.setValue(x) return self.renderPeakPoint([x, y]) self.noisePrt pass
def threshold(pseudocounts): """The input for the threshold function is an array of pseudocounts. (This can be changed) The ouput is a threshold (cutoff value); words with pseudocounts less than this threshold are deemed as low relevance for the topic (concept). """ density = scipy.stats.gaussian_kde(pseudocounts) xs = np.linspace(0, 1, 2000) ys = density(xs) np.seterr(divide='ignore') peaks = signal.find_peaks_cwt(-ys, np.array([0.001, 0.01, 0.1])) if peaks.any(): return min(xs[peaks]) else: return 0
def histogram_base_points(lanes, min_peak=25.0): """Uses histogram to find possible base points for lane lines""" hist = np.sum(lanes[int(lanes.shape[0] * 0.5):, :], axis=0) widths = [100] idx = find_peaks_cwt(hist, widths, max_distances=widths, noise_perc=50) if len(idx) < 2: return None # Avoid edges idx = [ i for i in idx if i > lanes.shape[1] * 0.1 and i < lanes.shape[1] * 0.9 and max(hist[i - 50:i + 50]) > min_peak ] return [min(idx), max(idx)]
def callback_auto_detect_peaks_button(): global peaks global user_AUTO_PEAK_DETECT_MIN_SNR peaks = signal.find_peaks_cwt( waveform_data_source.data['intensities'], np.arange(user_AUTO_PEAK_DETECT_WIDTHS_MIN, user_AUTO_PEAK_DETECT_WIDTHS_MAX), min_snr=user_AUTO_PEAK_DETECT_MIN_SNR, ) new_peaks_data = dict() new_peaks_data['peaks'] = peaks new_peaks_data['wavenumber_peaks'] = calibrate_model(peaks) new_peaks_data['intensities'] = np.array( [waveform_data_source.data['intensities'][i] for i in peaks]) + raman_configs.PEAK_MARKS_OVERHANG peaks_data_source.data = new_peaks_data
def find_peaks(x, y): # Default peak detection parameters wavelet = signal.ricker # wavelet of choice widths = np.arange(1, 20) # range of widths of the ricker wavelet to search/evaluate max_distances = widths / 8. # ridgeline connectivity threshold; smaller values gives more peaks; larger values considers overlapping peaks as one gap_thresh = 4 # threshold number of rows for ridgeline connectivity; smaller values gives more peaks min_length = 3 # minimum ridgeline length; smaller values gives more peaks min_snr = 2 # Minimum SNR noise_perc = 10 # percentile of points below which to consider noise h = 3 # number of points skipped in finite differences truncationlow = 10 # low q truncation for zeros truncationhigh = 50 peaks = signal.find_peaks_cwt(y, widths, wavelet, max_distances, gap_thresh, min_length, min_snr, noise_perc) peaks = peaks[1:] return list(np.array(np.vstack([x[peaks], y[peaks], peaks])))
def plotgraph(x, y, y2, pp, title, xlabel, ylabel, rp): if all(v == 0 for v in y): print('There is nothing in "%s"' % title) else: plt.title(title) plt.xlabel(xlabel) plt.ylabel(ylabel) plt.plot(x[rp[0]:rp[1]], y[rp[0]:rp[1]]) if y2 != 0: ### <> a = np.empty(np.size(x[rp[0]:rp[1]])) a.fill(y2) plt.plot(x[rp[0]:rp[1]], a) if pp == 1: peakFind = signal.find_peaks_cwt(y, np.arange(0.01, 1)) plt.plot(x[peakFind], y[peakFind], 'ro') plt.show()
def estimate_max(table): table = table[1:] sim_freqs = [(x[0],(x[2]-x[3]))for x in table] sim_freqs = sorted(sim_freqs, key=lambda x: x[1]) # find the peaks xs = [x[1] for x in sim_freqs] peaks = list(find_peaks_cwt(xs, np.arange(50, 200))) # this produces a list. Find the biggest one in the list big = (0,0) for peak in peaks: if sim_freqs[peak][1] > big[1]: big = (sim_freqs[peak][0], sim_freqs[peak][1]) return big[0]
def call_tads(ob: np.ndarray): """ Currently this method will results too much tads. """ from scipy import ndimage, signal ob[~np.isfinite(ob)] = 0 insu = insu_score(ob) gau_insu = -ndimage.gaussian_filter1d(insu, 3) peaks = signal.find_peaks_cwt(gau_insu, np.arange(2, 5)) borders = [0] + list(peaks) + [len(insu)] info = {'ob': ob} score = TadScore(info=info) dp = dp_solve(borders, score) return dp[0][-1].extract_tads()
def lane_peaks(histogram): peaks = signal.find_peaks_cwt(histogram, np.arange(1, 150), min_length=150) midpoint = np.int(histogram.shape[0] / 2) # if we found at least two peaks use the signal approach (better in shadows) if len(peaks) > 1: # in case more then 2 found just get the left and right one peak_left, *_, peak_right = peaks # otherwise just choose the highest points in left and right of center segments else: peak_left = np.argmax(histogram[:midpoint]) peak_right = np.argmax(histogram[midpoint:]) + midpoint return peak_left, peak_right
def update_graph(self): """Updates the graph with new letters frequencies""" # get the letters frequencies #l, v = self.parse_file(self.lineEdit.text()) time, voltage = self.get_the_wave(self.lineEdit.text()) # clear the Axes self.mpl.canvas.ax.clear() self.mpl.canvas.bx.clear() # draw a bar chart for letters and their frequencies # set width to 0.5 and shift bars of 0.25, to be centered #self.mpl.canvas.ax.bar(np.arange(len(l))-0.25, v, width=0.5) # reset the X limits #self.mpl.canvas.ax.set_xlim(xmin=-0.25, xmax=len(l)-0.75) # set the X ticks & tickslabel as the letters #self.mpl.canvas.ax.set_xticks(range(len(time))) #self.mpl.canvas.ax.set_xticklabels(time) # enable grid only on the Y axis self.mpl.canvas.ax.get_xaxis().grid(True) self.mpl.canvas.ax.get_yaxis().grid(True) self.mpl.canvas.ax.plot(voltage, 'k') # draw a bar chart for letters and their frequencies # set width to 0.5 and shift bars of 0.25, to be centered #self.mpl.canvas.bx.bar(np.arange(len(l))-0.25, v, width=0.5) updated_voltage = np.fft.rfft(voltage) for i in range(len(updated_voltage)): if abs(updated_voltage[i]) < self.thresholdSlider.value(): updated_voltage[i] = 0 updated_voltage = np.fft.irfft(updated_voltage) m = [] for i in range(len(updated_voltage)): m.append(-updated_voltage[i]) self.mpl.canvas.bx.plot(updated_voltage, 'b') peakkind = signal.find_peaks_cwt(m, np.arange(1,2500)) for i in peakkind: self.mpl.canvas.bx.annotate('local', xy=(i, m[i]), \ arrowprops = dict(facecolor = 'black', shrink = 0.1)) # reset the X limits # self.mpl.canvas.bx.set_xlim(xmin=-0.25, xmax=len(l)-0.75) # set the X ticks & tickslabel as the letters # self.mpl.canvas.bx.set_xticks(range(len(l))) # self.mpl.canvas.bx.set_xticklabels(l) # enable grid only on the Y axis self.mpl.canvas.bx.get_yaxis().grid(True) self.mpl.canvas.bx.get_xaxis().grid(True) # force an image redraw self.mpl.canvas.draw()
def fit(xdata, ydata, distribution): """Identify and fit an arbitrary number of peaks in a 1-d spectrum array. Parameters ---------- xdata : 1-d array X data. ydata : 1-d array Y data. Returns ------- results : lmfit.MinimizerResults. results of the fit. To get parameters, use `results.params`. """ # Identify peaks index = find_peaks_cwt(ydata, widths=np.arange(1, 100)) # Number of peaks n_peaks = len(index) # Construct initial guesses parameters = lmfit.Parameters() for peak_i in range(n_peaks): idx = index[peak_i] # Add center parameter parameters.add(name='peak_{}_center'.format(peak_i), value=xdata[idx]) # Add height parameter parameters.add(name='peak_{}_height'.format(peak_i), value=ydata[idx]) # Add width parameter parameters.add( name='peak_{}_width'.format(peak_i), value=.1, ) # Minimize the above residual function. results = lmfit.minimize(residual, parameters, args=[distribution, xdata], kws={'ydata': ydata}) return results, parameters
def getlanelinesbase(imgx): ''' Returns lane (left, rigth) lines imgx: Input image bird eye binary therhold ''' img = imgx imghalf = int(img.shape[0] * 0.5) imglength = img.shape[0] hist = np.sum(img[imghalf:, :], axis=0) indexvec = find_peaks_cwt(hist, np.arange(1, 550)) indexl = indexvec[0] indexr = indexvec[-1] lanebase = [(indexl, imglength), (indexr, imglength)] return lanebase
def x_corr(ken_volt, norm_volts): """This finds the maximum peak using pattern match for peaks between the the selected kernel and entire signal. :param array ken_volts:voltage array for selected kernel in mV :param array norm_volts:normalized filtered signal (dimensionless) :return float corr_max: maximum peak :return array corr: Resultant array from correlation """ from numpy import correlate, arange from scipy import signal corr = correlate(ken_volt, norm_volts, "full") threshold = 0.6 * max(corr) all_peak_ind = signal.find_peaks_cwt(corr, arange(1, 10)) corr_peak = [corr[int(i)] for i in all_peak_ind] corr_max = [i for i, x in enumerate(corr_peak) if x >= threshold] return corr_max, corr
def doCompute(): zw = xa.params['ZSampMargin']['Value'][1] - xa.params['ZSampMargin'][ 'Value'][0] + 1 width = np.arange(1, zw) while True: xa.doInput() data = xa.Input['Input'][0, 0, :] # # Find signal peaks peakidx = signal.find_peaks_cwt(data, width) out = np.zeros(data.shape) out[peakidx] = data[peakidx] xa.Output['Find Peaks'] = out # # Output xa.doOutput()
def peakDetect(t, v): """ Detects R wave peaks from processed ECG data. This function identifies R wave peaks using the continuous wavelet transform Args: t (list): List of ECG time floats v (list): List of processed ECG voltage floats. Returns: peakind (list): List of integer indices corresponding to peaks in input lists. """ peakind = signal.find_peaks_cwt(v, np.arange(20, 40, .1)) return peakind
def number_cwt_peaks(x, n): """ This feature calculator searches for different peaks in x. To do so, x is smoothed by a ricker wavelet and for widths ranging from 1 to n. This feature calculator returns the number of peaks that occur at enough width scales and with sufficiently high Signal-to-Noise-Ratio (SNR) :param x: the time series to calculate the feature of :type x: pandas.Series :param n: maximum width to consider :type n: int :return: the value of this feature :return type: int """ return len( find_peaks_cwt(vector=x, widths=np.array(range(1, n + 1)), wavelet=ricker))
def DatosyFourier(datos_segundos, amplitud): total = len(amplitud) tiempo = total / datos_segundos tiempo_x = np.linspace(0, tiempo, total) """ Aquí se aplica la transformada de Fourier""" f = abs(np.fft.fft(amplitud)) freq = abs(np.fft.fftfreq(len(amplitud), d=tiempo_x[1] - tiempo_x[0])) indexes = find_peaks_cwt(f, np.arange(2, 10)) freqBus = freq[indexes] frecuenciasFinales = freqBus[:2] Numero = DeterminarNumero(frecuenciasFinales[0], frecuenciasFinales[1]) return Numero
def wpeaks(vector): """ This is the function that will be mapped by multiprocess. This is a wrapper around the scipy function. It uses a parameter - wavelet_widths that is configured outside this function. Parameters ---------- vector : 1D numpy array Feature vector containing peaks Returns ------- peak_indices : list List of indices of peaks within the prescribed peak widths """ peak_indices = find_peaks_cwt(np.abs(vector), wavelet_widths, **kwargs) return peak_indices
def find_peaks(x, y, width=5, threshold=5, limit=20): """ Find peaks in active data set using continuous wavelet transformation Parameters ---------- width: float (default=5) estimate of peak size in x data units threshold: float (default=5) min percent of max to count as a peak (eg 5 = only peaks above 5 percent reported) limit: int max limit of peaks to report (sorted by intensity) Returns ------- peak_pos : list indices associated with peak positions Notes ----- Do I really want to include x data here? Should I report x-positions in terms of indices or in terms of x positions """ # scale factor to remove units from x data xscale = len(x) / (max(x) - min(x)) lower = width * xscale * 0.75 upper = width * xscale * 1.25 peak_pos = signal.find_peaks_cwt(y, np.arange(lower, upper)) data_max = np.max(y) # remove peaks that are not above the threshold. peak_pos = [i for i in peak_pos if (y[i] / data_max) > (threshold / 100)] # only use the most intense peaks, zip two lists together, # make the y-values as the first item, and sort by it (descending) peak_pos = [ yval for (index, yval) in sorted(zip(y[peak_pos], peak_pos), reverse=True) ] peak_pos = sorted(peak_pos[0:limit]) return peak_pos
def histogram_base_points(lanes, min_peak=25.0, edge_percentage=0.9): hist = np.sum(lanes[int(lanes.shape[0] * 0.5):, :], axis=0) idx = find_peaks_cwt(hist, [100], max_distances=[100], noise_perc=50) # Doesn't make sense if there are less than two lanes if len(idx) < 2: return None # Avoid edges idx = [ i for i in idx if i > lanes.shape[1] * (1 - edge_percentage) and i < lanes.shape[1] * edge_percentage and max(hist[i - 50:i + 50]) > min_peak ] return [min(idx), max(idx)]
def cwt_average_dist_between(x: np.ndarray): try: peaks, _ = find_peaks(x) if len(peaks) > 1: peak_width, _, _, _ = peak_widths(x, peaks) peaks_cwt_ = find_peaks_cwt(x, peak_width) if len(peaks_cwt_) > 1: out = np.mean(np.diff(peaks_cwt_)) else: out = 0 else: out = 0 except ValueError: out = np.nan return out