def process(fname): Fs, x = wavfile.read(fname) a = string.split(fname,".wav") b = string.split(a[0],"cw") sys.stdout.write(b[1]) sys.stdout.write(",") # find frequency peaks of high volume CW signals if fft_scan: f,s = periodogram(x,Fs,'blackman',4096,'linear',False,scaling='spectrum') # download peakdetect from # https://gist.github.com/endolith/250860 from peakdetect import peakdet threshold = max(s)*0.4 # only 0.4 ... 1.0 of max value freq peaks included maxtab, mintab = peakdet(abs(s[0:len(s)/2-1]), threshold,f[0:len(f)/2-1] ) if plotter: plt.plot(f[0:len(f)/2-1],abs(s[0:len(s)/2-1]),'g-') print maxtab from matplotlib.pyplot import plot, scatter, show scatter(maxtab[:,0], maxtab[:,1], color='blue') plt.show() # process all CW stations with higher than threshold volume if fft_scan: for freq in maxtab[:,0]: print "\nfreq:%5.2f" % freq demodulate(x,Fs,freq) else: demodulate(x,Fs,MORSE_FREQUENCY)
def findPeaksAndTroughs(self, ydataset, delta, xdataset=None): '''returns a list of peaks and troughs in tuple of (peak_position, peak_value). If x data set is not provided, it returns as tuple of (peak_index, peak_value)''' if xdataset != None: peaks,troughs=peakdet(ydataset[:], delta, xdataset[:]) else: peaks,troughs=peakdet(ydataset[:], delta) return peaks, troughs
def findPeaksAndTroughs(self, ydataset, delta, xdataset=None): '''returns a list of peaks and troughs in tuple of (peak_position, peak_value). If x data set is not provided, it returns as tuple of (peak_index, peak_value)''' if xdataset != None: peaks, troughs = peakdet(ydataset[:], delta, xdataset[:]) else: peaks, troughs = peakdet(ydataset[:], delta) return peaks, troughs
def get_actpeaks(nn, thresh=30): ch = 0 act_peaks = [] for gr in range(nn.n_ass): r = nn.mon_rate_Mg[ch][gr].smooth_rate(width=2.0 * ms) act_peaks.append(peakdet(r, thresh)[0]) return act_peaks
def findPeaksAndTroughs(self, ydataset, delta, xdataset=None): '''returns a list of peaks and troughs in tuple of (peak_position, peak_value). If x data set is not provided, it returns as tuple of (peak_index, peak_value)''' if xdataset is not None: xdataset = dnp.asarray(xdataset) return peakdet(dnp.asarray(ydataset), delta, xdataset)
def find_peak(Fs, signal): """ Find the signal frequency and maximum value """ f,s = periodogram(signal, Fs, 'blackman', 1024*32, 'linear', False, scaling='spectrum') threshold = max(s)*0.9 # only 0.4 ... 1.0 of max value freq peaks included maxtab, mintab = peakdet(abs(s[0:int(len(s)/2-1)]), threshold, f[0:int(len(f)/2-1)]) return maxtab, f, s
def get_peaks(data, delta=0.085, damping=True, RATE=8000, CHUNK=1024): ''' :param data: spectrum data :param delta: see peakdetect.py :param damping: neglect a point if amplitude is lower than threshold :return: peaks as array of points(freq;amplitude) ''' maxs, mins = peakdetect.peakdet(data, delta) # 0.035 # Convert taps number in frequencies if (len(maxs) > 0): for i in range(len(maxs)): maxs[i][0] *= RATE / CHUNK * 0.8 # maxs[0:len(maxs)][0] *= RATE/CHUNK maxs = maxs[:len(maxs) // 2] # Get first part of vector # Delete peaks lower then threshold while (damping): damping = False for i in range(len(maxs)): if (maxs[i][1] < 0.7): # 0.5 maxs = np.delete(maxs, i, 0) damping = True break # Print peaks # if (len(maxs)>0): # print(list(maxs)) return maxs
def computeRidgeFrequency(image, allowed_range=None): length = np.min(image.shape) # Compute the FFT of the image using a gaussian window kernel = hannWin2D(image.shape) img = image * kernel img = (img - img.mean()) / img.std() img = np.fft.fftshift(np.absolute(np.fft.fft2(img))) # Convert the image to polar representation img = cart2pol(img, 32) # Sum (L2-norm) along the angle axis, to get the energy for each circle circle_energy = np.sqrt(np.sum(img**2, axis=1)) # Suppress low order terms if allowed_range != None: radii = np.arange(0, len(circle_energy)) radii_upper = radii < length / allowed_range[0] radii_lower = radii > length / allowed_range[1] radii_nallowed = np.logical_not( np.logical_and(radii_upper, radii_lower)) radii = radii[radii_nallowed] circle_energy[radii] = circle_energy.min() # Find the most energetic circle circle_logen = np.log(circle_energy) circle_logen = scipy.ndimage.filters.gaussian_filter(circle_logen, sigma=2, mode="nearest") peakind, _ = peakdet(circle_logen, circle_logen.std() / 2) if len(peakind) == 0: return 10 else: max_peak = np.argmax(peakind[:, 1]) if peakind[max_peak, 0] == 0: return 10 else: return length / peakind[max_peak, 0]
def threshold_data(data, threshold, noise, other_data): intensity_high_enough = data > thresh intensity_greater_than_other = data > other_data[idx - 1] firing_points = np.where(intensity_high_enough & intensity_greater_than_other)[0] boundary_points = np.diff(firing_points) boundary_idxs = list(np.concatenate(np.argwhere(boundary_points))) firing_bounds = list(zip(boundary_idxs[::2], boundary_idxs[1::2])) for start, end in firing_bounds: # sp = start_points[idx] # ep = end_points[idx] + 1 # end += 1 x_values = np.arange(start, end) intensities = intensity[start:end] curve_pen = pg.mkPen(colors[idx], width=2) curve = pg.PlotDataItem(x=x_values, y=intensities, pen=curve_pen) firing_plot_list.append(curve) # Plotting the curves highlighting each base image_plot.getRoiPlot().addItem(curve) try: peaks, mins = peakdet(v=intensities, delta=noise, x=x_values) if len(peaks) == 0 or len(mins) == 0: peaks = np.NAN substack = np.mean(stack[start:end], 0) call = get_call(substack, czpro, idx) seqdf = seqdf.append(pd.DataFrame({ 'base': [call], 'times': [start] }), ignore_index=True) else: # point = pg.PlotDataItem(maxes, pen = None, symbol = 'o', symbolBrush = 'g') # self.p1.getRoiPlot().addItem(point) # self.firingplotlist.append(point) # point = pg.PlotDataItem(mins, pen = None, symbol = 'o', symbolBrush = 'r') # self.p1.getRoiPlot().addItem(point) # self.firingplotlist.append(point) for idx, x in enumerate(peaks): if idx == 0: ssp = start sep = int(mins[idx][0]) elif idx == len(peaks) - 1: ssp = int(mins[idx - 1][0]) sep = end else: ssp = int(mins[idx - 1][0]) sep = int(mins[idx][0]) substack = np.mean(stack[ssp:sep + 1], 0) call = get_call(substack, czpro, idx) seqdf = seqdf.append(pd.DataFrame({ 'base': [call], 'times': [ssp] }), ignore_index=True) peak_series = peak_series.append(pd.Series([peaks])) min_series = min_series.append(pd.Series([mins])) except Exception as e: raise ValueError
def removeClutter(self, clutRatio = 0.5, dist = 10): surfclut = self.firstReturn(self.clutim) bclutim = np.zeros_like(self.img) for col in xrange(self.clutim.shape[1]): iprofile = self.clutim[:,col] bgnoise = np.max(iprofile) peakmx, peakmn = pd.peakdet(iprofile,1e-26) if peakmx.shape[0] <= 0: continue peakind = peakmx[:,0].astype(int) ind = np.where(peakmx[:,1] > bgnoise * clutRatio)[0] peakind = peakind[ind] #peakind = np.where(iprofile > bgnoise * 0.5)[0].astype(int) bclutim[peakind,col] = 1 plt.figure() xy = np.where(bclutim == 1) x = xy[0] y = xy[1] plt.scatter(y,x - 1300, color='r',marker = '.',zorder = 1) plt.xlim(0,self.clutim.shape[1]) plt.imshow(self.clutim[1300:3600,:], zorder = 0) filename = self.outpath + '/s_' + self.trackID + '_rmclut.png' plt.savefig(filename) plt.close() for col in xrange(self.peakim.shape[1]): deltaH = int(self.surfecho[col] - surfclut[col]) peakind = np.where(self.peakim[:,col] != 0)[0].astype(int) tmp = peakind peakind = np.delete(tmp, np.where(tmp - deltaH >= self.peakim.shape[0])[0]) if len(peakind) <= 0 or abs(deltaH) > 15: continue if abs(deltaH) > 0: self.peakim[peakind - deltaH, col] = self.peakim[peakind,col] self.peakim[peakind,col] = 0 for col in xrange(self.peakim.shape[1]): self.peakim[1:self.surfecho[col] + 1,col] = 0 clind = np.where(bclutim[:,col] > 0)[0] pkind = np.where(self.peakim[:,col] > 0)[0] if len(pkind) <= 0: self.peakim[self.surfind[col], col] = 1 continue #self.peakim[pkind[0], col] = 0 for k in clind: anyind = np.where(abs(pkind - k) < dist)[0] if len(anyind) <= 0: continue else: self.peakim[pkind[anyind],col] = 0 self.peakim[self.surfind[col], col] = 1
def get_peaks(rates, cmpf, thresh): '''returns peaks above some thresh given the rates of multiple groups''' peaks = [] for gr in range(len(rates)): r = numpy.interp(numpy.arange(len(rates[gr])*cmpf)/cmpf,\ numpy.arange(len(rates[gr])),rates[gr]) peaks.append(peakdet(r, thresh)[0]) return peaks
def peak_windows(data, width=80): highs, lows = peakdet(data, 0.75) peak_centers = np.array(np.r_[highs[:, 0], lows[:, 0]], dtype=np.uint32) windows = np.zeros((peak_centers.shape[0], width)) for ind, center in enumerate(peak_centers): windows[ind, :] = data[center - width / 2:center + width / 2] return windows
def peak_windows(data, width=80): highs, lows = peakdet(data, 0.75) peak_centers = np.array(np.r_[highs[:,0], lows[:,0]], dtype=np.uint32) windows = np.zeros((peak_centers.shape[0], width)) for ind, center in enumerate(peak_centers): windows[ind, :] = data[center-width/2:center+width/2] return windows
def peak(): global voltage0, voltage1, timeval maxtab0, mintab0 = peakdetect.peakdet(voltage0, 0.1, timeval) # change 2nd argument to fine tune peak detection maxtab1, mintab1 = peakdetect.peakdet(voltage1, 0.1, timeval) # change 2nd argument to fine tune peak detection filelog(maxtab0,'Maximum',1,'w') filelog(mintab0, 'Minimum', 1, 'a') filelog(maxtab1, 'Maximum', 2, 'a') filelog(mintab1, 'Minimum', 2, 'a') plt.subplot(2,1,1) plt.plot(timeval,voltage0) plt.scatter(array(maxtab0)[:, 0], array(maxtab0)[:, 1], color='blue') plt.scatter(array(mintab0)[:, 0], array(mintab0)[:, 1], color='red') plt.subplot(2,1,2) plt.plot(timeval, voltage1) plt.scatter(array(maxtab1)[:, 0], array(maxtab1)[:, 1], color='blue') plt.scatter(array(mintab1)[:, 0], array(mintab1)[:, 1], color='red') os.startfile('timelog.csv') plt.show()
def find_peaks(x,y, delta = .004): """ this applies the peak detection algorithm. look at the source code to find out.... """ print('x', len(x)) print('y',len(y)) print('delta',delta) mx,mn= peakdet(y,delta,x=x) return mx.T,mn.T
def detect_optimal_knn(arr, rad_lst=[0.1, 0.2, 0.3], sample_size=10000): """ Detects optimal values for knn in order to facilitate material separation. Parameters ---------- arr: array Set of 3D points. rad_lst: list Set of radius values to generate samples of neighborhoods. This is used to select points to calculate a number of neighboring points distribution from the point cloud. sample_size: int Number of points in arr to process in order to genrate a distribution. Returns ------- knn_lst: list Set of k-nearest neighbors values. """ # Generating sample indices. sids = np.random.choice(np.arange(arr.shape[0]), sample_size, replace=False) # Obtaining nearest neighbors' indices and distance for sampled points. # This process is done just once, with the largest value of radius in # rad_lst. Later on, it is possible to subsample indices by limiting # their distances for a smaller radius. dist, ids = set_nbrs_rad(arr, arr[sids], np.max(rad_lst), True) # Initializing empty list to store knn values. knn_lst = [] # Looping over each radius value. for r in rad_lst: # Counting number of points inside radius r. n_pts = [len(i[d <= r]) for i, d in zip(ids, dist)] # Binning n_pts into a histogram. y, x = np.histogram(n_pts) # Detecting peaks of accumulated points from n_pts. maxtab, mintab = peakdet(y, 100) maxtab = np.array(maxtab) # Appending knn values relative to peaks detected in n_pts. knn_lst.append(x[maxtab[:, 0]]) # Flattening nested lists into a final list of knn values. knn_lst = [i for j in knn_lst for i in j] return knn_lst
def PeakDetect(data): ''' This function uses the accompanying program peakdetect.py to find peaks in the transformed data, and saves them out as an array of arrays, where each element array corresponds to each transformed signal. ''' peakData = [] for i in range(1, len(data[0,:])): Peaks, Lows = pd.peakdet(data[:,i],(max(data[:,i])/15),data[:,0]) peakData.append(Peaks) return peakData
def perfind2(): P = 200. tbase = 1000. nf = 5000 s2n = logspace( log10(3),log10(15),5 ) print "S/N sig-noise noise-noise" for s in s2n: # Generate a lightcurve. f,t = keptoy.lightcurve(P=P,tbase=tbase,s2n=s) o = blsw.blswrap(t,f,nf=nf,fmax=1/50.) # Subtract off the trend o['p'] -= median_filter(o['p'],size=200) # Find the highest peak. mxid = argmax(o['p']) mxpk = o['p'][mxid] # Find all the peaks mxt,mnt = peakdet(o['p'],delta=1e-3*mxpk,x=o['parr']) mxt = array(mxt) # Restrict attention to the highest 100 but leaving off top 10 t1id = where( (mxt[::,1] > sort( mxt[::,1] )[-100]) & (mxt[::,1] < sort( mxt[::,1] )[-10]) ) fig = plt.gcf() fig.clf() ax = fig.add_subplot(111) ax.plot(o['parr'],o['p'],label='Signal') ax.scatter( mxt[t1id,0],mxt[t1id,1],label='Tier 1' ) # tpyical values of the highest 100 peaks. hair = median(mxt[t1id,1]) left = min(o['parr']) right = max(o['parr']) ax.hlines(hair,left,right) if mxpk > 3*hair: print "Peroid is %.2f" % (o['parr'][mxid]) else: print "No signal" ax.legend() plt.show()
def find_peak(fname): """Find the signal frequency and maximum value""" #print("find_peak",fname) Fs, x = wavfile.read(fname) f,s = periodogram(x, Fs,'blackman',8192,'linear', False, scaling='spectrum') threshold = max(s)*0.8 # only 0.4 ... 1.0 of max value freq peaks included maxtab, mintab = peakdet(abs(s[0:int(len(s)/2-1)]), threshold,f[0:int(len(f)/2-1)] ) try: val = maxtab[0,0] except: print("Error: {}".format(maxtab)) val = 600. return val
def peakimCWT(self, scales, bgSkip = 20): # CWT-based peak detection if self.surfecho is None: return if self.filtim is not None: im = self.filtim elif self.img is not None: im = self.img else: return self.peakim = np.zeros_like(im) num = np.zeros((len(scales),im.shape[1])) for col in xrange(im.shape[1]): # direct peak detection after strong logGabor filtering is not working #peakmx, peakmn = pd.peakdet(im[:,col],0.000001) #peakind = peakmx[:,0].astype(int) #self.peakim[peakind,col] = 1 cwtMatr = signal.cwt(im[:,col], signal.ricker, scales) idxStart = int(self.surfecho[col]) for r in xrange(cwtMatr.shape[0]): cwtRow = cwtMatr[r,:] bgmax = 0 # take the first pixels till surfecho to be the background bgSig = cwtRow[1:idxStart - bgSkip] bgmax = max(bgSig) if bgmax is None: bgmax = 0 peakmx, peakmn = pd.peakdet(cwtRow,0.000001) if peakmx.shape[0] <= 0: continue peakind = peakmx[:,0].astype(int) ind = np.where(peakmx[:,1] > bgmax)[0] peakind = peakind[ind] #if col == 0 and r == 0: # plt.plot(im[:,col]) # plt.plot(peakind, im[peakind,col],'ro') # plt.show() self.peakim[peakind,col] = self.peakim[peakind,col] + cwtRow[peakind] #num[peakind,col] = num[peakind,col] + 1 num[r,col] = len(peakind) return num
def Calculate_cycles(self): Dankle = list() PersonFrames = self.Person.Fdata for x in xrange(len(PersonFrames)): each = PersonFrames[x].CompleteFrame distance = self.Horizontal_distance(each, 16, 17) Dankle.append(distance) x = xrange(0, Dankle.__len__(), 1) #print max(Dankle) Norm = self.Moving_Average(Dankle) maximum, minimum = peakdet(Norm, 0.2) if len(maximum) < 3: print "Not Enough Frames" else: cycle_frames = maximum[:, 0] self.Calculate_Half_Cycle(cycle_frames)
def audioRead(self): buffer_bytes = self.audio_buffer.readAll() if buffer_bytes: buffer_bytes = buffer_bytes[:self.audio_nsamples * self.audio_bytes] # truncate data = np.frombuffer(buffer_bytes, dtype=np.single) if max(data) > 0: #print(max(data)) data /= max(data) self.sc_time.new_data(data) nb_samples = len(buffer_bytes) // self.audio_bytes self.peak_signal[self. peak_signal_index:self.peak_signal_index + nb_samples] = data self.peak_signal_index += nb_samples if self.peak_signal_index > self.nfft_peak: f, s = periodogram(self.peak_signal, self.audio_rate, 'blackman', self.nfft_peak, 'linear', False, scaling='spectrum') threshold = max(s) * 0.9 if threshold > self.thr: maxtab, mintab = peakdet(abs(s[0:int(len(s) / 2 - 1)]), threshold, f[0:int(len(f) / 2 - 1)]) tone = maxtab[0, 0] #print(f'tone: {tone} thr: {(10.0 * np.log10(threshold)):.2f} dB') self.sc_peak.new_data(f, s, maxtab, tone) nside_bins = 1 f, t, img = specimg(self.audio_rate, self.peak_signal[:self.nfft_peak], tone, self.nfft, self.noverlap, nside_bins) print(t.shape, f) if len(f) != 0: img_line = np.sum(img, axis=0) img_line /= max(img_line) self.test_line(img_line, 0.75) self.sc_tenv.new_data(img_line, 50) self.sc_zenv.new_data(img_line[:50]) self.peak_signal = np.roll(self.peak_signal, self.nfft_peak, axis=0) self.peak_signal_index -= self.nfft_peak
def findSubPeaks(red, blue, redstart, redend, bluestart, blueend, redsubthresh, bluesubthresh): pulsecount = 0 df = pd.DataFrame({ "ident": [], 'stimes': [], 'etimes': [], 'peaks': [], 'mins': [] }) df['peaks'] = df['peaks'].astype(object) df['mins'] = df['mins'].astype(object) for i, x in enumerate([[redstart, redend], [bluestart, blueend]]): df = df.append(pd.DataFrame({ "ident": [i] * len(x[0]), 'stimes': x[0], 'etimes': x[1] }), ignore_index=True) df = df.sort_values(by='stimes').reset_index() for i, x in enumerate(df.ident): sp = int(df.stimes[i]) ep = int(df.etimes[i]) if x == 0: intensity = red noise = redsubthresh else: intensity = blue noise = bluesubthresh peaks, mins = peakdet(v=intensity[sp:ep], delta=noise, x=np.arange(sp, ep)) if len(peaks) == 0 or len(mins) == 0: pulsecount += 1 else: df.set_value(i, 'peaks', peaks.astype(object)) df.set_value(i, 'mins', mins.astype(object)) pulsecount += 1 + len(peaks) print(pulsecount) return df
def process(fname): Fs, x = wavfile.read(fname) a = string.split(fname,".wav") b = string.split(a[0],"cw") sys.stdout.write(b[1]) sys.stdout.write(",") # find frequency peaks of high volume CW signals if fft_scan: f,s = periodogram(x,Fs,'blackman',4096,'linear',False,scaling='spectrum') # download peakdetect from # https://gist.github.com/endolith/250860 from peakdetect import peakdet threshold = max(s)*0.4 # only 0.4 ... 1.0 of max value freq peaks included maxtab, mintab = peakdet(abs(s[0:len(s)/2-1]), threshold,f[0:len(f)/2-1] ) if plotter: plt.plot(f[0:len(f)/2-1],abs(s[0:len(s)/2-1]),'g-') print maxtab
def track_freqs(self): if self.bouts != None: # initiate bout maxima & minima coord lists self.peak_maxes_y = [] self.peak_maxes_x = [] self.peak_mins_y = [] self.peak_mins_x = [] # initiate instantaneous frequency array self.tail_frequencies = np.zeros(self.tail_angle_arrays[self.current_tracking_num].shape[0]) for i in range(self.bouts.shape[0]): # get local maxima & minima peak_max, peak_min = peakdetect.peakdet(self.tail_end_angle_arrays[self.current_tracking_num][self.current_crop][self.bouts[i, 0]:self.bouts[i, 1]], 0.02) # change local coordinates (relative to the start of the bout) to global coordinates peak_max[:, 0] += self.bouts[i, 0] peak_min[:, 0] += self.bouts[i, 0] # add to the bout maxima & minima coord lists self.peak_maxes_y += list(peak_max[:, 1]) self.peak_maxes_x += list(peak_max[:, 0]) self.peak_mins_y += list(peak_min[:, 1]) self.peak_mins_x += list(peak_min[:, 0]) # calculate instantaneous frequencies for i in range(len(self.peak_maxes_x)-1): self.freqs[self.peak_maxes_x[i]:self.peak_maxes_x[i+1]] = 1.0/(self.peak_maxes_x[i+1] - self.peak_maxes_x[i]) extra_tracking = { 'bouts': self.bouts, 'peak_points': self.peak_points, 'valley_points': self.valley_points, 'frequencies': self.tail_frequencies } # update plot # self.smoothed_deriv_checkbox.setChecked(False) # self.tail_canvas.plot_tail_angle_array(self.tail_end_angle_array[self.current_crop], self.bouts, self.peak_maxes_y, self.peak_maxes_x, self.peak_mins_y, self.peak_mins_x, self.freqs, keep_limits=True) # update plot self.analysis_window.update_plot(self.plot_array, self.current_plot_type, extra_tracking=extra_tracking, keep_limits=True)
def _power_dispersion_scalar(self, inten_spec, uwb_scan, uwb_pcd): ''' This scalar is only applicable to the R1 region. The lowest value of this scalar is 0.5 Input: -uwb_pcd, dtype: pointcloud. Pointcloud representing uwb mask in global frame with appropiate channel for encoding The region channel shall be set with appropiate values -inten_spec dtype: dictionary. Index by the range_bin index, containing [mean_return, std_return] -uwb_scan dtype: struct/UWBScan.msg. Contains a list with intensity values for each bin starting from zero Output: pointcloud, with original channels and a channel representing the _power_dispersion_scalar scalar value, if _power_dispersion_scalar scalar is unknown then -1 ''' # YH used a guassian kernal as a scalar kern = np.array( signal.get_window(('gaussian', 4), 9)) # for the inten_spectrum determine the peaks intensity = list(uwb_scan.intensity) [max_pks, values] = peakdet(intensity,2) pk_bins = [pair[0] for pair in max_pks] points = uwb_pcd.points region = uwb_pcd.channels[5].values pow_dip = uwb_pcd.channels[7].values ranges = uwb_pcd.channels[0].values for i, point in enumerate(points): if i > 0: if region[i] == 1: range_bin = int(m.floor(ranges[i]/self.range_res)) dist2peak = self._distance_to_peak(pk_bins, range_bin) # the kernal specifies weightings based on the distance to peak # index 0 applies to a pkdist of 4, index 1 appliest to a pkdist of 3 ... kern_index = 4 - dist2peak pow_dip[i] = kern[kern_index] else: pow_dip[i] = -1 else: pow_dip[i] = -1
def findPeaksMultiple(signal, scale, figTitle): fs = int(app.getEntry("Sampling Frequency")) or 8000 highestFreq = fs / 2 #The smallest number that we deem a valid point before which is likely Noise lowestQuefrency = 1.0 / highestFreq cutOff = app.getEntry('Source Cut Off Frequency') or 170.0 timePeriod = 1.0 / cutOff cutOffIndex = len(scale) - 1 startIndex = None for index, value in enumerate(scale): if value >= timePeriod: cutOffIndex = index break if startIndex is None and value >= lowestQuefrency: startIndex = index new_scale = [1.0 / x for x in scale[startIndex:]] peak_tuples = peakdet(signal[startIndex:], 0.0001, new_scale)[0] peak_frequencies = [x[0] for x in peak_tuples] m = findMultiples(peak_frequencies, True) z = sorted(peak_frequencies) print(figTitle + '---------') print(z[0]) print(z[-1]) maximum_index = 0 max_val = -1 for val in m: if val > 40.0 and val < 110.0: print(val, m[val]) if len(m[val]) > max_val: max_val = len(m[val]) maximum_index = val print(maximum_index) print(len(m[maximum_index])) print(m[maximum_index][-1]) print(figTitle + '---------\\')
def window(fseq, window_size=1000, moving_size=100): WinNum = int((len(fseq) - window_size) / moving_size + 1) Feature = np.zeros((WinNum, 6)) for i in range(int((len(fseq) - window_size) / moving_size + 1)): # yield fseq[i*moving_size:i*moving_size+window_size] Feature[i, 0] = np.mean(fseq[i * moving_size:i * moving_size + window_size]) Feature[i, 1] = np.std(fseq[i * moving_size:i * moving_size + window_size]) p = peakdetect.peakdet( fseq[i * moving_size:i * moving_size + window_size], 60) Feature[i, 2] = len(p[0]) Feature[i, 3] = len(p[1]) Feature[i, 4] = max(fseq[i * moving_size:i * moving_size + window_size]) a = 0 for j in range(i * moving_size, i * moving_size + window_size): a += fseq[j]**2 Feature[i, 5] = (np.sum(p[0]**2) + np.sum(p[1]**2)) / a * 100 return Feature
def num_beats(self): """Determines the number of beats in the data set :return num_beats: number of beats in the data set """ norm_voltage = list(self.voltage - numpy.mean(self.voltage)) norm_voltage_ref = list(self.voltage_ref - numpy.mean(self.voltage_ref)) corr_voltage = numpy.convolve(norm_voltage, norm_voltage_ref[144:504], mode='same') voltage_delta = numpy.sqrt(numpy.mean(corr_voltage**2)) * 2.5 [self.max_corr, min_corr] = peakdet(corr_voltage, voltage_delta, self.time) max_corr_voltage = [] for i in self.max_corr: max_corr_voltage.append(i[0]) num_beats = len(max_corr_voltage) logging.info("Number of beats successfully counted.") return num_beats
sns.set_style('whitegrid') # Choose between DetectorBank response to tone, white noise, or both noise # and tone at SNR of -4dB or -15dB modes = ['tone', 'noise', 'snr-4', 'snr-15'] mode = modes[0] file = os.path.join('../Data', 'freqz_400_48000_5e-04_{}.csv'.format(mode)) arr = np.loadtxt(file, delimiter='\t', skiprows=1) freq = arr[:, 0] mx = arr[:, 1] max_db = 20 * np.log10(mx) maxtab, _ = pk.peakdet(max_db, 15, freq) peaks = maxtab[:, 0] for n in range(1, len(peaks)): diff = peaks[n] - peaks[n - 1] data = (peaks[n - 1], peaks[n], diff) print('Difference between {:9.3f} and {:9.3f}: {:8.3f} Hz'.format(*data)) plt.semilogx(freq, max_db, color='dodgerblue') xtx = 2 * np.logspace(0, 4, num=5) xlab = ['{:g}'.format(x) for x in xtx] plt.xticks(xtx, xlab) plt.xlabel('Frequency (Hz)') plt.ylabel('Peak amplitude (dB)') plt.grid(True, which='both')
def segmentvad(feat, amplitude, dist_1, numfrwin, nsh, pflin, fs): #Performing Segmentation win_ind_1 = 0 win_ind_2 = win_ind_1 + numfrwin dim = len(feat[:, 1]) #to find the dimensions of the FEATURE dist = 0 count = 0 w1 = np.zeros((dim, numfrwin)) w2 = np.zeros((dim, numfrwin)) d = [] frame_no = [] num_frame = len(feat[1, :]) Nw = math.floor(fs * 0.03) Nsh = math.floor(fs * nsh) #0.010 by default frame_index_w1 = 0 frame_index_w2 = 0 + numfrwin * Nsh while (win_ind_2 + numfrwin < num_frame): #finding the KL-DIVERGENCE between W1/W2 w1[:, 0:numfrwin] = feat[:, win_ind_1:win_ind_1 + numfrwin] w2[:, 0:numfrwin] = feat[:, win_ind_2:win_ind_2 + numfrwin] cov1 = [] cov2 = [] mean1 = [] mean2 = [] mean1 = np.mean(w1, 1) mean2 = np.mean(w2, 1) cov1 = np.var(w1, 1) cov1 = np.diag(cov1) cov2 = np.var(w2, 1) cov2 = np.diag(cov2) mean1.shape = (1, dim) mean2.shape = (1, dim) dist1 = (np.trace(np.dot(inv(cov2), cov1))) dist2 = np.dot((mean2 - mean1), inv(cov2)) mean1.shape = (dim, 1) mean2.shape = (dim, 1) dist2 = np.dot(dist2, (mean2 - mean1)) k = dim dist3 = (np.linalg.det(cov2) / np.linalg.det(cov1)) dist3 = np.log(dist3) dist = 0.5 * (dist1 + dist2 - k + dist3) d.append(dist) frame_no.append(frame_index_w2) win_ind_1 = win_ind_1 + 1 win_ind_2 = win_ind_2 + 1 frame_index_w2 = win_ind_2 * Nsh frame_index_w3 = win_ind_1 * Nsh d = np.array(d) d.shape = (len(d), ) d = d.tolist() frame_no = np.array(frame_no) time_stamps = frame_no / fs frame_no.shape = (len(frame_no), ) time_stamps.shape = (len(frame_no), ) frame_no = frame_no.tolist() time_stamps = time_stamps.tolist() d1 = np.zeros((numfrwin, )) d1 = d1.tolist() d1.extend(d) #Finding the Peaks to Identify the Change points b = [] max1, min2 = peakdet( d1, dist_1) #max1 gives maxima peaks;min2 gives minima peaks temp = [] temp.append(feat) siz = max1.size if (siz == 0 ): #If no change point found, return the entire feat file as it is return b, b, b, b, temp, temp time_stamp = [] for i in range(0, len(max1[:, 0])): if (max1[i, 1] < amplitude): max1[i, 1] = 0 else: time_stamp.append(max1[i, 0] * (Nsh) / fs) frame_stamp = max1[:, 0] frame_stamp = frame_stamp.tolist() clus = [] i = 0 lasfram = len(feat[1, :]) #segmenting the features depending on change points Nen = int(frame_stamp[0]) Nst = 0 clus.append(feat[:, Nst:Nen]) while (i < len(time_stamp) - 1): Nst = int(frame_stamp[i]) Nen = int(frame_stamp[i + 1]) clus.append(feat[:, Nst:Nen]) i = i + 1 Nst = int(frame_stamp[len(frame_stamp) - 1]) Nen = lasfram clus.append(feat[:, Nst:Nen]) counlin = 1 cluslin = [] cov_lin = [] mfcc_lin = [] clus2 = clus ts_lin = [] fs_lin = [] #performing linear clustering while (counlin < len(clus2)): if (counlin <= 1): bicdist = bicdist_single(clus2[counlin], clus2[counlin - 1], pflin) if (bicdist < 0): clus3 = np.concatenate((clus2[counlin], clus2[counlin - 1]), axis=1) cluslin.append(clus3) else: cluslin.append(clus2[counlin - 1]) cluslin.append(clus2[counlin]) ts_lin.append(time_stamp[counlin - 1]) fs_lin.append(frame_stamp[counlin - 1]) else: bicdist = bicdist_single(clus2[counlin], cluslin[len(cluslin) - 1], pflin) if (bicdist < 0): clus3 = np.concatenate( (cluslin[len(cluslin) - 1], clus2[counlin]), axis=1) cluslin[len(cluslin) - 1] = clus3 else: cluslin.append(clus2[counlin]) ts_lin.append(time_stamp[counlin - 1]) fs_lin.append(frame_stamp[counlin - 1]) counlin = counlin + 1 return time_stamp, frame_stamp, ts_lin, fs_lin, clus, cluslin
def analyze_peaks(self, delta, x=None): self.peaks, vals_ = peakdetect.peakdet(self.v, delta, x)
def audioRead(self): buffer_bytes = self.audio_buffer.readAll() if buffer_bytes: buffer_bytes = buffer_bytes[:self.audio_nsamples * self.audio_bytes] # truncate data = np.frombuffer(buffer_bytes, dtype=np.single) if max(data) > 0: #print(type(data), data.shape) data /= max(max(data), -min(data)) # data /= (max(data)/2.0) # data[data > 1] = 1 # data[data < -1] = -1 self.sc_time.new_data(data) nb_samples = len(buffer_bytes) // self.audio_bytes self.peak_signal[self. peak_signal_index:self.peak_signal_index + nb_samples] = data self.peak_signal_index += nb_samples if self.peak_signal_index > self.nfft_peak: f, s = periodogram(self.peak_signal, self.audio_rate, 'blackman', self.nfft_peak, 'linear', False, scaling='spectrum') threshold = max(s) * 0.9 if threshold > self.thr: self.thr_count = 2 else: if self.thr_count > 0: self.thr_count -= 1 if self.thr_count > 0: maxtab, mintab = peakdet(abs(s[0:int(len(s) / 2 - 1)]), threshold, f[0:int(len(f) / 2 - 1)]) tone = maxtab[0, 0] #print(f'tone: {tone} thr: {(10.0 * np.log10(threshold)):.2f} dB') self.sc_peak.new_data(f, s, maxtab, tone) nside_bins = 1 f, t, img = specimg(self.audio_rate, self.peak_signal[:self.nfft_peak], tone, self.nfft, self.noverlap, nside_bins) #print(t.shape, f) if len(f) != 0: img_line = np.sum(img, axis=0) if threshold > self.thr: # update scaling factor if signal present self.img_norm = max(img_line) / 1.5 img_line /= self.img_norm img_line[img_line > 1] = 1 if len(img_line) != self.pred_len: self.pred_len = len(img_line) self.sc_pred.set_mp(self.pred_len * 3) self.dataq.put(img_line) #self.test_line(img_line, 0.75) self.sc_tenv.new_data(img_line, 50) self.sc_zenv.new_data(img_line[:50]) self.peak_signal = np.roll(self.peak_signal, self.nfft_peak, axis=0) self.peak_signal_index -= self.nfft_peak
def online_FE(inputfile, featurefile): # read input from 3 files inputraw = pd.read_csv(inputfile) tempselected = [line.strip('\n') for line in open(featurefile)] selectedfeature = [line.split(':', 1) for line in tempselected] featuresetlist = ['mean','var','skewness','kurtosis','maximum','minimum', 'RMS','std','range','iqr','maxpeak','minpeak','all'] # take the selected attribute out for calculating features calculist = list() for (attr, feature) in selectedfeature: for a in inputraw.columns: if a.find(attr)>0 and feature in featuresetlist: calculist.append((a, feature)) break elif a == inputraw.columns[-1]: print(attr+'_'+feature+" is not a valid feture!Either attrname"\ " or feature is not a valid one.") if not calculist: print("No feature calculated!") return result = list() # calculate features for (attr, f) in calculist: if f == 'all': try: result.append(inputraw[attr].mean()) except: result.append(float('nan')) try: result.append(inputraw[attr].var()) except: result.append(float('nan')) try: result.append(inputraw[attr].skew()) except: result.append(float('nan')) try: result.append(inputraw[attr].kurt()) except: result.append(float('nan')) try: result.append(inputraw[attr].max()) except: result.append(float('nan')) try: result.append(inputraw[attr].min()) except: result.append(float('nan')) try: result.append(numpy.sqrt(numpy.mean(numpy.square( inputraw[attr])))) except: result.append(float('nan')) try: result.append(inputraw[attr].std()) except: result.append(float('nan')) try: result.append(inputraw[attr].max()-inputraw[attr].min()) except: result.append(float('nan')) try: result.append((inputraw[attr].quantile(0.75) - inputraw[attr].quantile(0.25))/2) except: result.append(float('nan')) try: maxtab, mintab = peakdetect.peakdet(inputraw[attr],.3) result.append(maxtab[:,1].max()) except: result.append(float('nan')) try: maxtab, mintab = peakdetect.peakdet(inputraw[attr],.3) result.append(mintab[:,1].min()) except: result.append(float('nan')) elif f == 'maxpeak': try: maxtab, mintab = peakdetect.peakdet(inputraw[attr],.3) result.append(maxtab[:,1].max()) except: result.append(float('nan')) elif f == 'minpeak': try: maxtab, mintab = peakdetect.peakdet(inputraw[attr],.3) result.append(mintab[:,1].min()) except: result.append(float('nan')) elif f == 'mean': try: result.append(inputraw[attr].mean()) except: result.append(float('nan')) elif f == 'var': try: result.append(inputraw[attr].var()) except: result.append(float('nan')) elif f == 'skewness': try: result.append(inputraw[attr].skew()) except: result.append(float('nan')) elif f == 'kurtosis': try: result.append(inputraw[attr].kurt()) except: result.append(float('nan')) elif f == 'maximum': try: result.append(inputraw[attr].max()) except: result.append(float('nan')) elif f == 'minimum': try: result.append(inputraw[attr].min()) except: result.append(float('nan')) elif f == 'RMS': try: result.append(numpy.sqrt(numpy.mean(numpy.square( inputraw[attr])))) except: result.append(float('nan')) elif f == 'std': try: result.append(inputraw[attr].std()) except: result.append(float('nan')) elif f == 'range': try: result.append(inputraw[attr].max()-inputraw[attr].min()) except: result.append(float('nan')) elif f == 'iqr': try: result.append((inputraw[attr].quantile(0.75) - inputraw[attr].quantile(0.25))/2) except: result.append(float('nan')) else: print("Please Enter valid Feature:mean,variance,skewness,kurtosis"\ ",max,min,RMS,std,range,iqr") # output if not result: print("No feature calculated!") return os.chdir(".\\") if sys.version_info >= (3, 0, 0): f = open("Result.csv", 'w', newline='') else: f = open("Result.csv", 'wb') w = csv.writer(f) namelist = [] # create the column labels for a, f in calculist: if f == 'all': for fl in featuresetlist[:-1]: namelist.append(a+"_"+fl) else: namelist.append(a+"_"+f) out = dict() for i in range(0,numpy.size(namelist)): out[namelist[i]] = result[i] # write labels w.writerow(namelist) # write values w.writerow(result) return out
""" n = 2**12 n_window = int(len(zs) / 2) window = signal.blackmanharris(n_window) z_fft = np.fft.rfft(zs[0:n_window] * window, n=n) * 4 / n_window z_fft_mag = np.absolute(z_fft) f = np.fft.rfftfreq(n, d=dt) # peak detection # this stuff isn't working # peaks = peakdetect_parabola(z_fft_mag, f) # print(peaks) # indices = peakutils.indexes(z_fft_mag, thres=0.1, min_dist=1) # this one works maxtab, _ = peakdetect.peakdet(z_fft_mag, 0.01 * np.max(z_fft_mag), range(int(n / 2) + 1)) plt.figure() plt.plot(f, z_fft_mag) plt.scatter(f[np.array(maxtab)[:, 0].astype(int)], np.array(maxtab)[:, 1], color='blue') plt.show() print(f[np.array(maxtab)[0, 0].astype(int)]) fft_motion = SinusoidalMotion(acc_mag=np.array(maxtab)[1, 1], f=f[np.array(maxtab)[1, 0].astype(int)], dt=dt, noise_std=q, meas_noise_std=R) # initial conditions
#In [21]: len(d) #Out[21]: 32 # #In [22]: len(d[0]) #Out[22]: 131071 for fname in h5files: f = H.File(source_dir+runtag+"/"+fname, 'r') d = N.array(f['/data/data']) nasic = len(d) nhist = len(d[0]) adu = N.arange(nhist)-65535 f.close() for asics in N.arange(nasic): [max, min] = PD.peakdet(d[asics][65535-100:65535+2000], 20) xmax = [max[i][0]-100 for i in N.arange(len(max))] ymax = [max[i][1] for i in N.arange(len(max))] xmin = [min[i][0]-100 for i in N.arange(len(min))] ymin = [min[i][1] for i in N.arange(len(min))] #print xmax, ymax, xmin, ymin fig = P.figure() P.plot(adu, d[asics]) P.scatter(xmax, ymax, c='g', marker='o') P.scatter(xmin, ymin, c='r', marker='o') canvas = fig.add_subplot(111) canvas.set_title(fname + "_asic%s"%(asics+1)) P.xlim(options.min_value, options.max_value) P.xlabel("Intensity (ADUs)") P.ylabel("Number of Pixels") #P.draw()
indir="D:/measuring/data/20160419" data = pd.read_csv(os.path.join(indir,"FSR_80nm_Q600_it10000.csv"), usecols=[2,4,5]) max_WL = data['Wavelength'].max() min_WL = data['Wavelength'].min() max_I = data['Intensity'].max() min_I = data['Intensity'].min() data_mean=data.groupby('Column').agg([np.mean]) # Peak detection method for higher order modes if peak_detect_H_order_modes == True: I_array = np.asarray(data_mean['Intensity']) # write intensity dataframe as array peak_1=peakdet(I_array, order_peak_detect_higher_modes) # use peakdet definition to detect all peaks from intensity peak_WL_1=np.transpose(np.asarray(data_mean.loc[peak_1[0][:,0],'Wavelength']))[0] #get corresponding wavelengths peak_I_1=peak_1[0][:,1] peak_freq_1 = 3.e8/(peak_WL_1*1.e-9) else: print 'There are no higher order modes in this plot.' # Peak detection method for TEM00 modes if peak_detect_TEM00 == True: I_avg_array = np.asarray(data_mean['Intensity']) # argrelextrema only takes an array indices = argrelextrema(I_avg_array, np.greater, order=order_peak_detection) # the number 100 is somewhat arbitrary, but seems to work. peak_WL= [] #connect indices with the values for the wavelength, creating arrays peak_I=[]
def peakdetection(self): cscore = ascore = gscore = tscore = spseries = pd.Series() markers = ['C', 'A', 'G', 'T'] time, intensity = self.p1.roiCurve1.getData() noise = np.std(intensity[intensity < np.mean(intensity)]) thresh = np.median(intensity) + 2 * noise firing = np.where(intensity > thresh)[0] startandend = np.diff(firing) startpoints = np.insert(startandend, 0, 2) endpoints = np.insert(startandend, -1, 2) startpoints = np.where(startpoints > 1)[0] endpoints = np.where(endpoints > 1)[0] startpoints = firing[startpoints] endpoints = firing[endpoints] peakseries = pd.Series() minseries = pd.Series() for i, x in enumerate(startpoints): sp = startpoints[i] ep = endpoints[i] + 1 # self.p1.getRoiPlot().plot(x = np.arange(sp, ep), y = intensity[sp:ep], pen = 'b') try: peaks, mins = peakdet(v=intensity[sp:ep], delta=noise, x=np.arange(sp, ep)) if len(peaks) == 0: peaks = np.NAN if len(mins) == 0: peaks = np.NAN # else: # self.p1.getRoiPlot().plot(peaks, pen = None, symbol = 'o', symbolBrush = 'g') # self.p1.getRoiPlot().plot(mins, pen = None, symbol = 'o', symbolBrush = 'r') peakseries = peakseries.append(pd.Series([peaks])) minseries = minseries.append(pd.Series([peaks])) # p1.plot(df[i], pen = None, symbol = 'o', symbolBrush = 'b') except: ValueError if np.size(peaks) < 4: substack = np.mean(self.stack[sp:ep], 0) cscore = cscore.append(pd.Series( np.dot(substack.ravel(), self.czpro[0].ravel())), ignore_index=True) ascore = ascore.append(pd.Series( np.dot(substack.ravel(), self.czpro[1].ravel())), ignore_index=True) gscore = gscore.append(pd.Series( np.dot(substack.ravel(), self.czpro[2].ravel())), ignore_index=True) tscore = tscore.append(pd.Series( np.dot(substack.ravel(), self.czpro[3].ravel())), ignore_index=True) spseries = spseries.append(pd.Series(sp), ignore_index=True) else: for j, y in enumerate(peaks): if j == 0: ssp = sp sep = int(mins[j][0]) elif j == len(peaks) - 1: ssp = int(mins[j - 1][0]) sep = ep else: ssp = int(mins[j - 1][0]) sep = int(mins[j][0]) substack = np.mean(self.stack[ssp:sep + 1], 0) cscore = cscore.append(pd.Series( np.dot(substack.ravel(), self.czpro[0].ravel())), ignore_index=True) ascore = ascore.append(pd.Series( np.dot(substack.ravel(), self.czpro[1].ravel())), ignore_index=True) gscore = gscore.append(pd.Series( np.dot(substack.ravel(), self.czpro[2].ravel())), ignore_index=True) tscore = tscore.append(pd.Series( np.dot(substack.ravel(), self.czpro[3].ravel())), ignore_index=True) spseries = spseries.append(pd.Series(ssp), ignore_index=True) scoredf = pd.DataFrame({ 'C': cscore, 'A': ascore, 'G': gscore, 'T': tscore, 'sp': spseries }) sequence = scoredf[['A', 'C', 'G', 'T']].idxmax(axis=1) print(sequence.str.cat()) for i, x in enumerate(sequence): if x == 'C': color = 'r' if x == 'A': color = 'y' if x == 'G': color = 'g' if x == 'T': color = 'b' text = pg.TextItem(x, color=color) seqplot = self.p1.getRoiPlot().addItem(text) if i == 0: self.seqplotlist = [text] else: self.seqplotlist.append(text) text.setPos(spseries[i], intensity[spseries[i]]) return sequence
def peakdetection(self,blueThresh,redThresh,bluePeakThresh,redPeakThresh): markers = ['C','A','G','T'] colors = ['r','b'] score = ascore = gscore = tscore = spseries = pd.Series() df = pd.DataFrame({"ident":[],'stimes':[],'etimes':[], 'peaks':[],'mins':[]}) seqdf = pd.DataFrame({'base':[],'times':[]}) time1, intensity1 = self.p1.roiCurve1.getData() time2, intensity2 = self.p1.roiCurve2.getData() intensities = [intensity1,intensity2] self.firingplotlist = [] peakseries = pd.Series() minseries = pd.Series() for ind,intensity in enumerate(intensities): if ind == 0: thresh = redThresh noise = redPeakThresh if ind ==1: thresh = blueThresh noise = bluePeakThresh firing = np.where(((intensity > thresh) & (intensity > intensities[ind-1])))[0] startandend = np.diff(firing) startpoints = np.insert(startandend, 0, 2) endpoints = np.insert(startandend, -1, 2) startpoints = np.where(startpoints>1)[0] endpoints = np.where(endpoints>1)[0] startpoints = firing[startpoints] endpoints = firing[endpoints] df = df.append(pd.DataFrame({"ident":[ind]*len(startpoints), 'stimes':startpoints,'etimes':endpoints}),ignore_index=True) for i,x in enumerate(startpoints): sp = startpoints[i] ep = endpoints[i]+1 curve = pg.PlotDataItem(x = np.linspace(sp, ep,ep-sp), y = intensity[sp:ep], pen = pg.mkPen(colors[ind], width = 2)) self.firingplotlist.append(curve) self.p1.getRoiPlot().addItem(curve) try: peaks, mins = peakdet(v = intensity[sp:ep],delta = noise, x = np.arange(sp,ep)) if len(peaks) == 0 or len(mins) == 0: peaks = np.NAN substack = np.mean(self.stack[sp:ep],0) if ind == 0: cscore = np.dot(substack.ravel(),self.czpro[0].ravel()) ascore = np.dot(substack.ravel(),self.czpro[1].ravel()) if cscore > ascore: call = 'C' else: call = 'A' if ind == 1: gscore = np.dot(substack.ravel(),self.czpro[2].ravel()) tscore = np.dot(substack.ravel(),self.czpro[3].ravel()) if gscore > tscore: call = 'G' else: call = 'T' seqdf = seqdf.append(pd.DataFrame({'base':[call],'times':[sp]}), ignore_index = True) else: # point = pg.PlotDataItem(peaks, pen = None, symbol = 'o', symbolBrush = 'g') # self.p1.getRoiPlot().addItem(point) # self.firingplotlist.append(point) # point = pg.PlotDataItem(mins, pen = None, symbol = 'o', symbolBrush = 'r') # self.p1.getRoiPlot().addItem(point) # self.firingplotlist.append(point) for i,x in enumerate(peaks): if i == 0: ssp = sp sep = int(mins[i][0]) elif i == len(peaks)-1: ssp = int(mins[i-1][0]) sep = ep else: ssp = int(mins[i-1][0]) sep = int(mins[i][0]) substack = np.mean(self.stack[ssp:sep+1],0) if ind == 0: cscore = np.dot(substack.ravel(),self.czpro[0].ravel()) ascore = np.dot(substack.ravel(),self.czpro[1].ravel()) if cscore > ascore: call = 'C' else: call = 'A' if ind == 1: gscore = np.dot(substack.ravel(),self.czpro[2].ravel()) tscore = np.dot(substack.ravel(),self.czpro[3].ravel()) if gscore > tscore: call = 'G' else: call = 'T' seqdf = seqdf.append(pd.DataFrame({'base':[call],'times':[ssp]}), ignore_index = True) peakseries = peakseries.append(pd.Series([peaks])) minseries = minseries.append(pd.Series([mins])) except: ValueError seqdf = seqdf.sort(['times','base']) for i,x in enumerate(seqdf.index): base = seqdf.base[i] if base == 'C': color = 'r' intensity = intensities[0][int(seqdf.times[i])] if base == 'A': color = 'y' intensity = intensities[0][int(seqdf.times[i])] if base == 'G': color = 'g' intensity = intensities[1][int(seqdf.times[i])] if base == 'T': color = 'b' intensity = intensities[1][int(seqdf.times[i])] text = pg.TextItem(base,color = color) seqplot = self.p1.getRoiPlot().addItem(text) if i == 0: self.seqplotlist = [text] else: self.seqplotlist.append(text) text.setPos(seqdf.times[i],intensity) print(seqdf.base.str.cat()) return seqdf
def FE(begin,\ end,\ userfilename = "E:\\LED_Data\\LifeCycleList.csv",\ filedir = "E:\\LED_Data\\18.non_nominal_0912\\current\\",\ featurefile = "E:\\pythonanalysis\\feature.txt"): # read input from 3 files userfileList = pd.read_csv(userfilename) featureList = [line.strip('\n') for line in open(featurefile)] #read current file and filtered by time period os.chdir(filedir) currentfilenameList = glob.glob("*.csv") currentfilelist = list() cfilteredfilenameList = list() for filename in currentfilenameList: for filenum in range(begin,end+1): if (int(filename[3:7]) >= userfileList.ix[filenum,1]) & (int(filename[3:7]) <= userfileList.ix[filenum,2]): temp = pd.read_csv(filename) if numpy.size(temp, 0) >= 900: currentfilelist.append(temp) cfilteredfilenameList.append(filename) totalresult = list() result = list() #calculate features index = 0 for file in currentfilelist: index = index + 1 result.append(index) for i,col in file.iteritems(): if i!="DataTime": for fea in featureList: if fea == 'maxpeak': try: maxtab, mintab = peakdetect.peakdet(col,.3) result.append(maxtab[:,1].max()) except: result.append(float('nan')) elif fea == 'minpeak': try: maxtab, mintab = peakdetect.peakdet(col,.3) result.append(mintab[:,1].min()) except: result.append(float('nan')) elif fea == 'mean': try: result.append(col.mean()) except: result.append(float('nan')) elif fea == 'variance': try: result.append(col.var()) except: result.append(float('nan')) elif fea == 'skewness': try: result.append(col.skew()) except: result.append(float('nan')) elif fea == 'kurtosis': try: result.append(col.kurt()) except: result.append(float('nan')) elif fea == 'max': try: result.append(col.max()) except: result.append(float('nan')) elif fea == 'min': try: result.append(col.min()) except: result.append(float('nan')) elif fea == 'RMS': try: result.append(numpy.sqrt(numpy.mean(numpy.square( col)))) except: result.append(float('nan')) elif fea == 'std': try: result.append(col.std()) except: result.append(float('nan')) elif fea == 'range': try: result.append(col.max()-col.min()) except: result.append(float('nan')) elif fea == 'iqr': try: result.append((col.quantile(0.75) - col.quantile(0.25))) except: result.append(float('nan')) else: print("Please Enter valid Feature:mean,variance,skewness,kurtosis\ ,max,min,RMS,std,range,iqr") if result: totalresult.append(result[:]) result.clear() # output if not totalresult: print("No feature calculated!") return f = open("totalResult.csv", 'w', newline='') w = csv.writer(f) namelist = [] # create the column labels namelist.append("Filenum") for name in currentfilelist[1].columns.values.tolist(): if name != "DataTime": for fea in featureList: namelist.append(name+"_"+fea) # write labels w.writerow(namelist) # write values for r in totalresult: w.writerow(r) f.close()