def getPeeksandBottoms(indexData, lookahead, delta, stocks): positive, negative = pDetect.peakdetect(indexData['Trade High'], x_axis=indexData.index.values, lookahead=lookahead, delta=delta) x, y = zip(*positive) positive, negative = pDetect.peakdetect(indexData['Trade Low'], x_axis=indexData.index.values, lookahead=lookahead, delta=delta) xmin, ymin = zip(*negative) ''' Getting peaks from index data in a new data frame indexPeeks ''' indexPeeks = indexData[indexData.index.isin(x)] indexPeeks['value'] = 1 indexPeeks['real'] = indexPeeks.index ''' Getting Buttoms from index data in a new data frame indexBottoms ''' indexBottoms = indexData[indexData.index.isin(xmin)] indexBottoms['value'] = -1 indexBottoms['real'] = indexBottoms.index #print(indexBottoms) stocksPeeks = HighAndLow(indexPeeks, indexBottoms, indexData, stocks) #print(stocksPeeks) ''' merging indexPeeks and indexBottoms into periodsIndex DataFrame ''' periodsIndex = pd.merge(indexPeeks, indexBottoms, how='outer', sort=True) #print(periodsIndex) #periodsIndex=periodsIndex.iloc[::-1] periodsIndex = periodsIndex.reset_index(drop=True) #print(periodsIndex) return [periodsIndex, stocksPeeks]
def get_strain(seqs,look_ahead): seq_list = list(seqs) dt_seq_list = np.diff(seq_list) basline_seq = np.mean(seq_list) #################################### max_strain,\ min_strain = peakdetect(seq_list, lookahead=look_ahead) #################################### max_dt_strain,\ min_dt_strain = peakdetect(dt_seq_list, lookahead=look_ahead) #################################### xm_max_strain = [p[0] for p in max_strain] ym_max_strain = [p[1] for p in max_strain] xm_max_dt_strain = [p[0] for p in max_dt_strain] ym_max_dt_strain = [p[1] for p in max_dt_strain] #################################### xm_min_strain = [p[0] for p in min_strain] ym_min_strain = [p[1] for p in min_strain] xm_min_dt_strain = [p[0] for p in min_dt_strain] ym_min_dt_strain = [p[1] for p in min_dt_strain] #################################### strain_coordinates = [] final_strain = 0 if basline_seq>0: xm_max_strain = xm_max_strain[0] ym_max_strain = ym_max_strain[0] x_final = y_final = 0 if len(xm_min_dt_strain)>1: index = min(range(len(xm_min_dt_strain)), key=lambda i: abs(xm_min_dt_strain[i]-xm_max_strain)) x_final = xm_min_dt_strain[index] y_final = ym_min_dt_strain[index] else: x_final = xm_min_dt_strain y_final = ym_min_dt_strain strain_coordinates = [[xm_max_strain,ym_max_strain],[x_final,y_final]] final_strain = y_final else: xm_min_strain = xm_min_strain[0] ym_min_strain = ym_min_strain[0] x_final = y_final = 0 if len(xm_max_dt_strain)>1: index = min(range(len(xm_max_dt_strain)), key=lambda i: abs(xm_max_dt_strain[i]-xm_min_strain)) x_final = xm_max_dt_strain[index] y_final = ym_max_dt_strain[index] else: x_final = xm_max_dt_strain y_final = ym_max_dt_strain strain_coordinates = [[xm_min_strain,ym_min_strain],[x_final,y_final]] final_strain = y_final return final_strain,seq_list,dt_seq_list,strain_coordinates
def resultByPeak(partial_df): """ Classification using peaks' distance """ d = partial_df['period'].iloc[0] / (10.0 * 2) if d == 50: classifier.setLowPassfilter(2, 0.17) d_thre = 20 count_thre = 2 look_a = 20 elif d == 100: classifier.setLowPassfilter(2, 0.03) d_thre = 40 count_thre = 1 look_a = 40 elif d == 150: classifier.setLowPassfilter(2, 0.01) d_thre = 60 count_thre = 1 look_a = 60 rec_h = classifier.LowPassFilter(partial_df['EOG_H'].as_matrix()) rec_v = classifier.LowPassFilter(partial_df['EOG_V'].as_matrix()) peaks_h = peakdetect(rec_h, lookahead=look_a) peaks_v = peakdetect(rec_v, lookahead=look_a) count = 0 d_th = [values[0] for plu_min in peaks_h for values in plu_min] d_th.sort() if not d_th is None: for i in range(1, len(d_th) - 1): if np.abs((d_th[i] - d_th[i - 1]) - d) < d_thre: if np.abs((d_th[i + 1] - d_th[i]) - d) < d_thre: count += 1 d_tv = [values[0] for plu_min in peaks_v for values in plu_min] d_tv.sort() if not d_tv is None: for i in range(1, len(d_tv) - 1): if np.abs((d_tv[i] - d_tv[i - 1]) - d) < d_thre: if np.abs((d_tv[i + 1] - d_tv[i]) - d) < d_thre: count += 1 if count >= count_thre: return 1, count else: return 0, count
def get_times(self): self.standard_times = {} for _run in self.control: if _run.type == 'control': peaks = peakdetect.peakdetect(_run.intensity, _run.time) self.standard_times['control'] = float([peaks[0][1][0]][0]) elif _run.type == concentration_to_find_time: peaks = peakdetect.peakdetect(_run.intensity, _run.time)[0] for peak in peaks: if peak[0] > min_elution_time: self.standard_times[_run.name] = float(peak[0]) break
def call_TADs(cool, name, func=insul_score, valleys=False, il=20, la=5, d=0.25, color='0,0,0'): results = [] for chrom in list(range(1, 20)) + ['X']: print(chrom) data = cool.matrix(sparse=True).fetch('chr' + str(chrom)).tocsr() S = insul_score(data, il) S /= np.nanmean(S) peaks = peakdetect(S, lookahead=la, delta=d)[int(valleys)] peaks = np.array(peaks).T[0].astype(int) tads = pd.DataFrame(np.array(list(zip(peaks[:-1], peaks[1:]))), columns=['x1', 'x2']) * serum.binsize tads['chr1'] = chrom tads_annot = pd.concat([ tads, tads.rename(columns={ 'chr1': 'chr2', 'x1': 'y1', 'x2': 'y2' }) ], axis=1)['chr1 x1 x2 chr2 y1 y2'.split()] results.append(tads_annot) TADs_annot = pd.concat(results) TADs_annot['color'] = color TADs_annot['comment'] = '%s insul_score %s %s %s' % (name, il, la, d) return TADs_annot
def run(__FILE=_FILE, __RATE=_RATE, __CROP=_CROP, __FREQ_TUNE=_FREQ_TUNE): global _FILE global _RATE global _CROP global _FREQ_TUNE _FILE=__FILE _RATE=__RATE _CROP=__CROP _FREQ_TUNE=__FREQ_TUNE print "-- Opening wav file." wav = wave.open(_FILE, 'r') print "-- File opened, compartmentalizing channels." full = np.fromstring(wav.readframes(_RATE), dtype=np.int16) left = full[0::2] right = full[1::2] print "-- Taking the FFT." l, r = np.fft.rfft(left), np.fft.rfft(right) l[:_CROP[0]] = 0 r[:_CROP[0]] = 0 print "-- Outputting tuning file. (This takes a while.)" print "-- Closest Freq to " + str(_FREQ_TUNE) + "Hz found at " +\ str(find_diff_to_closest_freq_and_make_file( _FILE + "-tune.wav", peakdetect.peakdetect(l, delta=10000)[0][0][0])) wav.close() print "-- Done."
def find_gpeaks(ns,locdir,binmax=258): """ find_gpeaks(ns,locdir,binmax) finds peaks and valleys in g(r) curve takes: ns, list of densities to analyse locdir, local directory for data binmax, the max bin number, hopefully temporary problem returns: peaks, list of [list of peaks and list of valleys] in format given by peakdetect.py """ import peakdetect as pk #ns = np.array([8,16,32,64,128,192,256,320,336,352,368,384,400,416,432,448]) binmax = 258 gdata = get_gdata(locdir,ns) peaks = {} maxima = {} minima = {} for k in gdata: extrema = pk.peakdetect( gdata[k]['g'][:binmax]/22.0, gdata[k]['rg'][:binmax]/22., lookahead=2.,delta=.0001) peaks[k] = extrema maxima[k] = np.asarray(extrema[0]) minima[k] = np.asarray(extrema[1]) return peaks
def butterfly(self, residuals, noise, Time, TimeError, TTimes): peaks = peakdetect(residuals, Time, 1, noise)[0] if len(peaks) == 0: raise RuntimeError("No peaks found") foundSpots = [] for peak in peaks: t = peak[0] h = peak[1] for interval in TTimes: if checkInBetween(t, interval): X, Y = self.planet.skyPosAtTime(t) X_min, Y_min = self.planet.skyPosAtTime(t - TimeError) X_max, Y_max = self.planet.skyPosAtTime(t + TimeError) fs = MultipurposeSpot( (X, Y), ((X_min, Y_min), (X_max, Y_max)), h, t, self.star) foundSpots.append(fs) continue realSpots = [] for t in Time: for obj in settings.GLOBAL_ALL_SPOTS: # Grab spot and creation/death times realSpot = obj[0] creation = obj[1] death = obj[2] # Check spot didn't die if checkInBetween(t, [creation, death]): # Correct for rotation star centre long timeCorrectedLong = 360 * ((t / self.star.period) % 1) # Visible region bounds bound_left = 270 bound_right = 90 # Correct spot and shift based on moving star long spotCorrectedLong = (realSpot.lon - timeCorrectedLong) % 360 timeCorrectedLat = realSpot.lat + ( t - creation) * realSpot.lat_vel # Check if spot was visible at that time if spotCorrectedLong > bound_left or spotCorrectedLong < bound_right: rs = MultipurposeSpot(None, None, realSpot.brightness, t, None, (None, timeCorrectedLat)) realSpots.append(rs) matchedSpots = [] for fs in foundSpots: distance = 1000 idx = np.nan for i, rs in enumerate(realSpots): val = np.sqrt( np.abs(fs.timeFound - rs.timeFound)**2 + np.abs(fs.coords[1] - rs.coords[1])**2) if val < distance: distance = val idx = i matchedSpots.append(realSpots[idx]) return foundSpots, realSpots, matchedSpots
def plot_fi_curves(): fig_fi = plt.figure(figsize=(3, 5)) ax_fi = None for cellno, celltype in enumerate(util.CELLTYPES): filename = os.path.join(DIRECTORY, FILES[celltype]) with h5.File(filename, 'r') as fhandle: groups = [key for key in fhandle.keys() if key.startswith(celltype)] cnt = len(groups) current_list = [] freq_list = [] ax_fi = fig_fi.add_subplot(len(util.CELLTYPES), 1, cellno + 1, sharey=ax_fi, sharex=ax_fi) for testno, test_name in enumerate(groups): test_group = fhandle[test_name] current = test_group.attrs['current'] if current == 0.0: continue current_list.append(current) dt = test_group['Vm'].attrs['dt'] vm = np.asarray(test_group['Vm']) times = np.arange(0, len(vm), 1.0) * dt peaks, troughs = peakdetect(vm, times, lookahead=3) peaks = np.asarray(peaks, dtype=np.float64) spike_times = peaks[peaks[:,1] > SPIKE_THRESHOLD, 0].copy() stim = np.asarray(test_group['stimulus']) if current > 0: tstart = np.where(np.diff(stim) > 0)[0][0] tend = np.where(np.diff(stim) < 0)[0][0] else: tstart = np.where(np.diff(stim) < 0)[0][0] tend = np.where(np.diff(stim) > 0)[0][0] tstart = tstart * dt tend = tend * dt positive_spike = spike_times[(spike_times > tstart) & (spike_times < tend)].copy() freq = len(positive_spike) / (tend - tstart) freq_list.append(freq) current_list = np.asarray(current_list) freq_list = np.asarray(freq_list) # rebound_freq_list = np.asarray(rebound_freq_list) sort_idx = np.argsort(current_list) current_list = current_list[sort_idx].copy() freq_list = freq_list[sort_idx].copy() ax_fi.plot(current_list[current_list > 0]*1e9, freq_list[current_list > 0], color=config.cellcolor[celltype], marker='^') #ax_fi.xaxis.set_visible(False) ax_fi.tick_params(axis='y', right=False) ax_fi.tick_params(axis='x', top=False) ax_fi.xaxis.set_visible(False) ax_fi.set_yticks([0, 300, 600]) ax_fi.set_ylim([0, 600]) ax_fi.spines['top'].set_visible(False) ax_fi.spines['right'].set_visible(False) #plt.setp(ax_fi, frame_on=False) # rebound_axes.plot(current_list[sort_idx], rebound_freq_list[sort_idx]) ax_fi.xaxis.set_visible(True) ax_fi.set_xlabel('Current (nA)') ax_fi.set_ylabel('spikes/s') fig_fi.subplots_adjust(bottom=0.1, top=0.95, left=0.3, right=0.95) fig_fi.savefig('figures/Figure_1_EFG_curves.png', transparent=True) plt.show()
def getLowPeaks(self, showplot = True, activity = "",lowSpeedLimit = 5): x = range(len(self.speedData)) y = self.speedData highpeaks, lowpeaks = peakdetect(y,x, 10) xm = [p[0] for p in highpeaks] ym = [p[1] for p in highpeaks] xn = list() yn = list() for p in lowpeaks: if(p[1] < lowSpeedLimit): xn.append(p[0]) yn.append(p[1]) if(showplot == True): plot = pylab.plot(x, y) pylab.hold(True) pylab.plot(xm, ym, 'go') pylab.plot(xn, yn, 'ro') pylab.title('peak(stop) detection for ' + str(activity) + ' speed over time') pylab.xlabel("time") pylab.ylabel("speed") pylab.show() return lowpeaks
def getpeaks(path: str, one_hot=False): rate, wav = wf.read(path) data = np.array(wav) degg = np.insert(np.diff(data), 0, 0) # To preserve no. of inputs out = np.array(peakdetect(degg, lookahead=5)) print(np.array(out[1]).shape) out = np.array(out[1])[:, 0] # Soft threshold thresh = -1 / 6 * np.max(np.abs(degg)) s = pd.Series(np.abs(degg)) thresh = -1 / 6 * s.nlargest(100).mean() # Apply Threshold dec = degg[out] <= thresh fin = out[np.nonzero(1 * dec)] diff_fin = np.diff(fin) threshold = 50 thresholded_diff = (diff_fin >= threshold) * 1.0 final_diff = np.insert(thresholded_diff, len(thresholded_diff) - 1, 1) * fin fin = final_diff.astype(np.int) if one_hot: gt = np.zeros(len(degg)) print("ONE HOT") gt[fin] = 1 return fin, gt, degg return fin, degg[fin], degg
def breathingrate(Data): timestep=0.083 # 0.083s for Walabot n=np.size(Data) Time=np.arange(0,n*timestep,timestep) peaks = peakdetect(Data,Time,lookahead=10) # average breathing rarly goes above 1breath/2second, set lookahead as half of that for 1 peak/second maxpeaks = np.array(peaks)[0] minpeaks =np.array(peaks)[1] maxX = [0 for k in range(np.size(maxpeaks))] maxY=[0 for k in range(np.size(maxpeaks))] for i in range(0,np.size(maxpeaks,0)): peakP=maxpeaks[i] maxX[i]=peakP[0] maxY[i]=peakP[1] minX = [0 for k in range(np.size(minpeaks))] minY=[0 for k in range(np.size(minpeaks))] for i in range(0,np.size(minpeaks,0)): peakP=minpeaks[i] minX[i]=peakP[0] minY[i]=peakP[1] bpm=((np.size(maxX)+np.size(minX))/2) / ((Time[-1]-Time[0])/60) # turn peaks into breaths/minute print(bpm) #plt.gcf().clear() #plt.plot(Time,Data,maxX,maxY,'bo',minX,minY,'ro') #plt.show() return bpm, maxX, maxY, minX, minY
def find_cycles(self, win_len=10, delta=1, lookahead=1, include_holds=True, **kwargs): """Locate peaks and troughs in the signal.""" resp_scaled = self._move_zscore(win_len * self.samp_freq) peaks, troughs = peakdetect(resp_scaled, delta=delta, lookahead=lookahead) # Make sure we start with an inhalation and end with an exhalation. if peaks[0] < troughs[0]: peaks = peaks[1:] if peaks[-1] > troughs[-1]: peaks = peaks[:-1] assert len(peaks) == len(troughs) - 1, \ 'Expected {} peaks, got {}'.format(len(troughs) - 1, len(peaks)) # Store the results in an IntervalTier. inhalations = zip(troughs[:-1], peaks) exhalations = zip(peaks, troughs[1:]) segments = tgt.IntervalTier(name='resp') for inh, exh in zip(inhalations, exhalations): inh_onset = inh[0] / self.samp_freq inh_offset = inh[1] / self.samp_freq exh_offset = exh[1] / self.samp_freq segments.add_interval(tgt.Interval(inh_onset, inh_offset, 'in')) segments.add_interval(tgt.Interval(inh_offset, exh_offset, 'out')) self.segments = segments if include_holds: # Pass kwargs to find_holds. self.find_holds(**kwargs)
def main(): dev = hp_3582a.hp_3582a(); dev.preset(); #preset the device time.sleep(0.5) #delay of 0.5 secs dev.set_span(8); #set span now set to 250Hz time.sleep(0.5) #delay of 0.5 secs dev.autoset_sensitivity(); #auto set the sensitivty of channel time.sleep(0.5) #delay of 0.5 secs dev.set_averaging(mode = "RMS", no_of_samples=4); #set averaging time.sleep(5); #delay of 0.5 secs data = dev.get_spectrum(); #get the spectrum data to find peaks ##!!!!!! WARNING: Peakdetection algoritam was not written by me !!!!!!!## ##!!!!!! make sure it is working well before using it !!!!!!!## peak_data = peakdetect.peakdetect(data, lookahead = 2); #peak detection max_peaks = peak_data[0]; #considering only the max peak and discarding mins dev.set_scale("linear") #change scale from 10 dB/div to linear for peaks in max_peaks: #print peaks; peaks.append(dev.freq_calc(peaks[0])); #calculate freq cooresp.. to index dev.set_marker(peaks[2]); #set marker at that peak time.sleep(3); #delay of 3 secs mark_val = dev.get_marker_data()[0]; #get marker value in volts peaks.append(mark_val); #append marker value to the list time.sleep(0.75); #delay of 0.75 secs print peaks; #print the peak info after reading each peak #preperation for consolidated report table = prettytable.PrettyTable(["Index","in dB", "Freq", "in V"]); for peaks in max_peaks: table.add_row(peaks); print table; return
def bio_signal_peak_detect(sig, fs, sigtype='resp'): signaltype = {'resp': fs / 2 * 4, 'ecg': fs / 2 * 2, 'bp': fs / 2} max_peaks, min_peaks = peakdetect(sig, lookahead=int(signaltype[sigtype])) max_peaks_idx, max_peaks_val = zip(*max_peaks) min_peaks_idx, min_peaks_val = zip(*min_peaks) return max_peaks_idx, max_peaks_val, min_peaks_idx, min_peaks_val
def get_MBS_GCI_intervals(MBS, fs, T0mean, F0max=500): F0max = F0max * 2 T0max = int(fs / F0max) [max_peaks, min_peaks] = peakdetect(MBS, lookahead=T0max) idx = np.asarray([min_peaks[j][0] for j in range(len(min_peaks))]) N = len(idx) search_rate = 0.28 search_left_rate = 0.01 interval = np.zeros((N, 2)) for n in range(N): if len(T0mean) > 1: start = idx[n] - int(T0mean[idx[n]] * search_left_rate) stop = idx[n] + int(T0mean[idx[n]] * search_rate) else: start = idx[n] - int(T0mean * search_left_rate) stop = idx[n] + int(T0mean * search_rate) if start < 1: start = 1 if stop > len(MBS) and start < len(MBS): stop = len(MBS) elif stop > len(MBS) and start >= len(MBS): break interval[n, 0] = start interval[n, 1] = stop return interval
def detect_peaks(self, max_n_peaks = 6): """ Detect maxima in given data max_n_peaks (int): maximum number of peaks that shuld be returned """ _spectrum = self.data['spectrum'] y = _spectrum[:, 1] x = _spectrum[:, 0] #look for maxima max_peaks, min_peaks = peakdetect.peakdetect(y, x, lookahead= 5, delta = 0) peaks = [] threshold = 1000 for peak in max_peaks: if peak[1] > threshold: peaks.append(peak) while len(peaks) > max_n_peaks: threshold += 1 peaks = [] for peak in max_peaks: if peak[1] > threshold: peaks.append(peak) return peaks
def getpeaks(data, one_hot=False): degg = np.insert(np.diff(data), 0, 0) # To preserve no. of inputs out = np.array(peakdetect(degg, lookahead=5)) out = np.array(out[1])[:, 0] out = out.astype(np.int) abs_degg = np.abs(degg) largest_ind = np.argpartition(abs_degg, -100)[-100:] thresh = -1 / 6 * np.mean(abs_degg[largest_ind]) # Apply Threshold dec = degg[out] <= thresh fin = out[np.nonzero(1 * dec)] diff_fin = np.diff(fin) threshold = 50 thresholded_diff = (diff_fin >= threshold) * 1.0 final_diff = np.insert(thresholded_diff, len(thresholded_diff) - 1, 1) * fin fin = final_diff.astype(np.int) if one_hot: ground_truth = np.zeros(len(degg)) ground_truth[fin] = 1 return fin, ground_truth, degg return fin, degg[fin], degg
def data(liste): peaks = peakdetect(liste[1]) positive = peaks[0] negative = peaks[1] pos_positions = [] neg_positions = [] for i in positive: pos_positions.append(i[0]) for i in negative: neg_positions.append(i[0]) t_pos_c = [] t_neg_c = [] for i in pos_positions: t_pos_c.append(liste[0][i]) for i in neg_positions: t_neg_c.append(liste[0][i]) difs = [] for i in range(1, len(t_pos_c)): dif = t_pos_c[i] - t_pos_c[i - 1] difs.append(dif) for i in range(1, len(t_neg_c)): dif = t_neg_c[i] - t_neg_c[i - 1] difs.append(dif) average = es.mean(difs) stdev = es.stdev(difs) return [average, stdev]
def find_peak(data, cm, search, lenx): localmax, localmin = peakdetect(data[cm - search:cm + search], x_axis=range(0, lenx)[cm - search:cm + search], lookahead=search / 4) localmax = sorted(localmax, key=lambda x: x[1])[-1] return localmax
def totPeakNum(sig, look_a=4, delta=500): """ Out: both peaks number count """ res = peakdetect.peakdetect(y_axis=sig, lookahead=look_a, delta=delta) num_plus = len(res[0]) num_minus = len(res[1]) return num_plus + num_minus
def ppg_sistolic_peaks(ppg_signal): peaks = peakdetect(ppg_signal, lookahead=5) peaks_max = [] peaks_max_ind = [] for i in peaks[0]: peaks_max.append(i[1]) for j in peaks[0]: peaks_max_ind.append(j[0]) return peaks_max_ind, peaks_max
def sidePeakNum(sig, look_a=4, delta=500): """ return plus/minus number count """ res = peakdetect.peakdetect(y_axis=sig, lookahead=look_a, delta=delta) num_plus = len(res[0]) num_minus = len(res[1]) return num_plus, num_minus
def ppg_min_peaks(ppg_signal): peaks = peakdetect(ppg_signal, lookahead=5) peaks_min = [] for i in peaks[1]: peaks_min.append(i[1]) peaks_min_ind = [] for j in peaks[1]: peaks_min_ind.append(j[0]) return peaks_min_ind, peaks_min
def data_removal_trial(array, remove_threshold): array = np.array(array) r, c = array.shape array_new = [] for idx in range(c): # - 1 temp = np.array(array[:, idx]) max_val, min_val = peakdetect(temp, lookahead=31) max_data = change_data(max_val) min_data = change_data(min_val) idx_max = [] idx_min = [] #max_average = [] #min_average = [] r, k = array.shape if max_data.size > 0: max_pos = max_data[:, 0].astype(np.int) max_value = max(max_data[:, 1]) max_average = sum(max_data[:, 1]) / len(max_data[:, 1]) for idx1 in range(r): if array[idx1, idx] >= remove_threshold * max_average: idx_max.append(idx1) if min_data.size > 0: min_value = min(min_data[:, 1]) min_pos = min_data[:, 0].astype(np.int) min_average = sum(min_data[:, 1]) / len(min_data[:, 1]) for idx2 in range(r): if array[idx2, idx] <= remove_threshold * min_average: idx_min.append(idx2) if idx_max.__len__() > 0: #row = len(array[:, 0]) - len(idx_max) #temp_array = np.empty((row, 7), dtype=float) for i in range(c): if i == 0: row = len(np.delete(array[:, i].copy(), idx_max)) temp_array = np.empty((row, c), dtype=float) temp_array[:, i] = np.delete(array[:, i], idx_max) array = temp_array if idx_min.__len__() > 0: #row = len(array[:, 0]) - len(idx_min) #print(len(array[:, 0]), len(idx_min), row) #temp_array1 = np.empty((row, 7), dtype=float) for i in range(c): #print(np.delete(array[:, i], idx_min).shape) if i == 0: row = len(np.delete(array[:, i].copy(), idx_min)) temp_array1 = np.empty((row, c), dtype=float) temp_array1[:, i] = np.delete(array[:, i], idx_min) array = temp_array1 return array
def find_lines(y,x_axis=None,lookahead=50): localmax,localmin = peakdetect(y,x_axis=x_axis,lookahead=lookahead/4) xmin, minval = zip(*localmin) xmax, maxval = zip(*localmax) # cat lists x = np.concatenate((xmin,xmax)) y = np.concatenate((minval,maxval)) return x,y
def dump_ss_fraction_peaks(flistfilename, trange=(2, 20), cutoff=0.2, binsize=5e-3, lookahead=10): """Plot the peaks in fraction of spiny stellate cells over multiple simulations.""" #data_dict = get_gaba_data_dict(flistfilename) peak_frac_med = defaultdict(list) peak_frac_mean = defaultdict(list) iqr_dict = defaultdict(list) with open( 'gaba_scale_ss_frac_cutoff_{}_binwidth_{}ms_lookahead_{}.csv'. format(cutoff, binsize * 1000, lookahead), 'wb') as fd: writer = csv.writer(fd, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL) writer.writerow( ('filename', 'gabascale', 'frac_mean', 'frac_med', 'frac_iqr')) for fname in get_filenames(flistfilename): data = TraubData(makepath(fname)) gaba = dict(data.fdata['/runconfig/GABA']) scale = gaba['conductance_scale'] print fname, gaba hist, bins = data.get_spiking_cell_hist('SpinyStellate', timerange=trange, binsize=binsize, frac=True) peaks, troughs = peakdetect(hist, bins[:-1], lookahead=lookahead) if len(peaks) == 0: print 'No peaks for', data.fdata.filename writer.writerow((fname, scale, '', '', '')) continue x, y = zip(*peaks) x = np.asarray(x) y = np.asarray(y) idx = np.flatnonzero(y > cutoff) frac_med = '' frac_mean = '' iqr = '' if len(idx) > 0: frac_med = np.median(y[idx]) frac_mean = np.mean(y[idx]) iqr = np.diff(np.percentile(y[idx], [25, 75])) if len(iqr) > 0: iqr = iqr[0] else: iqr = '' peak_frac_med[scale].append(frac_med) peak_frac_mean[scale].append(frac_mean) iqr_dict[scale].append(iqr) writer.writerow((fname, scale, frac_mean, frac_med, iqr)) return peak_frac_mean, peak_frac_med, iqr_dict
def sidebyPeak(sig, look_a=4, delta=500): """ Out: Integer(side) -1: Minus side 0: neutral(+cannot specify) 1: Plus side 3: Rubbing Return the dominant side of signal by the number of peak occured and the biggest peak's absolute value if there are more than 4 peaks return as Rubbing """ res = peakdetect.peakdetect(y_axis=sig, lookahead=look_a, delta=delta) num_plus = len(res[0]) num_minus = len(res[1]) if num_plus + num_minus > 4: return 3 if num_plus == 0 and num_minus == 0: return 0 elif num_plus > 0 and num_minus == 0: return 1 elif num_plus == 0 and num_minus > 0: return -1 else: max_index = np.argmax(np.array(res[0])[:, 1]) min_index = np.argmin(np.array(res[1])[:, 1]) max_val = res[0][max_index][1] min_val = res[1][min_index][1] if min_val > 0: return 1 elif max_val < 0: return -1 d_max = max_val - np.abs(min_val) if np.abs(d_max) < delta: max_x = res[0][max_index][0] min_x = res[1][min_index][0] if np.abs(max_x - min_x) < 25: if max_x < min_x: return 1 else: return -1 return 0 elif d_max > 0: return 1 elif d_max < 0: return -1 else: print("Plus/Minus max is exactely same value") return 0
def peak_detect(self, channels): """uses peak detect to detect peaks above at set delta, this may require configuration based on testing. THis gives the start of the tag in each channel Args: channels (tuple): tuple of the three channels gained from isolating the sample Returns: tuple: three peak detections into a tuple """ peak_detect_one = peakdetect.peakdetect(channels[0], x_axis=None, delta=40000) peak_detect_two = peakdetect.peakdetect(channels[1], x_axis=None, delta=40000) peak_detect_three = peakdetect.peakdetect(channels[2], x_axis=None, delta=40000) return peak_detect_one, peak_detect_two, peak_detect_three
def getPeaks(self, loadLabels): peaksValuesVariable = None Taverage = 0 nTimeStep = 0 if self.typeLoss == "MSEPeak": indexPeaksVariable,\ peaksValuesVariable = self.formatPeaksList(peakdetect(loadLabels,lookahead=1)) elif self.typeLoss == "MSECyclic": Taverage, nTimeStep = self.getMeanT(loadLabels) return peaksValuesVariable,\ Taverage,nTimeStep
def peakToTroughs(dailyret,dates): ''' Example: sr = s['retdat'] stkd = s['stockData'] dt = stkd['Date'] ptk = peakToTroughs(sr,dt) ''' ''' get cummulative percent changes''' drs = Series(dailyret) soc1dr = drs+1 soc1cumdr = soc1dr.cumprod() localPeaksPairs = peakdetect(y_axis=soc1cumdr,lookahead=1)[0] indexOfLocalPeaks = np.empty(len(localPeaksPairs)); for i in range(len(indexOfLocalPeaks)): indexOfLocalPeaks[i] = localPeaksPairs[i][0] # data frame with 2 columns, where column 1 is a peak, and column 2 is the next peak that follows it dd = DataFrame({'a':indexOfLocalPeaks[0:(len(indexOfLocalPeaks)-1)],'b':indexOfLocalPeaks[1:len(indexOfLocalPeaks)]}) # add one more row to dd to represent the last peak and last row of soc1cumdr, so # that you calculate the last possible trough, if it there was one between the last peak and the last day # of data lastDdValue = dd.iloc[len(dd)-1,1] lastValueInData = len(soc1cumdr)-1 dd = rbind(dd,[lastDdValue,lastValueInData]) def minBetween2Peaks(x): lowindex = int(x[0]) highindex = int(x[1]) minval = min(soc1cumdr[lowindex:(highindex+1)]) return minval localMins = dd.apply(minBetween2Peaks,1) localMins.index = range(len(localMins)) localPeaks = soc1cumdr[indexOfLocalPeaks.astype(int)] localPeaks.index = range(len(localPeaks)) diffs = (localMins - localPeaks)/localPeaks # get indices of localMins in soc1cumdr so that you can get their dates def ff(x): ''' this function gets the index of soc1cumdr whose value = x''' r = soc1cumdr[soc1cumdr==x].index[0] return r indexOfLocalMins = map(ff,localMins) datesOfLocalMins = Series(dates)[indexOfLocalMins] datesOfLocalMins.index = range(len(datesOfLocalMins)) # calculate peak to end of data def minBetweenPeakAndEnd(x): arr = soc1cumdr.iloc[x[0]:len(soc1cumdr)] return min(arr) absMinsToEnd = dd.apply(minBetweenPeakAndEnd,1) absMinsToEnd.index = range(len(absMinsToEnd)) diffsToEnd = (absMinsToEnd - localPeaks)/localPeaks ret = DataFrame({'Date':datesOfLocalMins,'Peak':localPeaks,'Valley':localMins,'Diff':diffs,'DiffToEnd':diffsToEnd}) return ret
def getPeaks(signal, neighbors_annotations): num_boundaries_all = [len(x)-1 for x in neighbors_annotations] median_num_boundaries = int(np.median(num_boundaries_all)) peaks = list(peakdetect.peakdetect(signal,lookahead=500,delta=0.2)) locs = [elem[0] for elem in peaks[0]] #get only the idx maxs = [elem[1] for elem in peaks[0]] #get only the value both_zipped = zip(maxs,locs) #zip them both to sort them both_sorted = sorted(both_zipped) sort_locs = [elem[1] for elem in both_sorted] if len(both_sorted) > median_num_boundaries: sort_locs = sort_locs[-median_num_boundaries:] return sort_locs
def peaks_find(dat, dx, lookahead=200, delta=0, type='max'): peaks_max, peaks_min = peakdetect(dat, None, lookahead, delta) xpd = [] ybeat = [] if (type == 'max'): for datapoint in peaks_max: xpd.append(datapoint[0] * dx) ybeat.append(datapoint[1]) if (type == 'min'): for datapoint in peaks_min: xpd.append(datapoint[0] * dx) ybeat.append(datapoint[1]) return xpd, ybeat
def generate_plot(key, my_seq): analysed_seq = ProteinAnalysis(my_seq) l = len(my_seq) window_size = 21 scale = analysed_seq.protein_scale(param_dict=amino_acids, window=window_size, edge=0.75) x = range((window_size + 1) / 2, len(scale) + (window_size + 1) / 2) lookahead = 7 minp, maxp = peakdetect(scale, lookahead=(lookahead + 1) / 2) start = min(x) - 1 xpeaks = [xp[0] + (window_size + 1) / 2 for xp in minp] ypeaks = [scale[xpi - (window_size + 1) / 2] for xpi in xpeaks] t_x = np.array(scale) added_min = np.where(t_x < 0.9)[0] print(added_min) xdpeaks = [xdp[0] + (window_size + 1) / 2 for xdp in maxp] ydpeaks = [scale[xdpi - (window_size + 1) / 2] for xdpi in xdpeaks] num_pos = np.where(np.array(ydpeaks) < 0.9)[0].size print(num_pos) if num_pos == 0 and len(added_min) != 0: added_val = [scale[i] for i in list(added_min)] minimum = added_val.index(min(added_val)) - start + 2 print(added_min[minimum]) print(added_val[minimum]) xdpeaks.append(added_min[minimum]) ydpeaks.append(added_val[minimum]) print("maxs:", np.array(xpeaks) + start) print("mins:", np.array(xdpeaks) + start) #print(scale) plt.clf() plt.plot(x, scale, 'b', xpeaks, ypeaks, 'ro', xdpeaks, ydpeaks, 'go') plt.grid(True) #plt.axis([0,max(x), min(scale)-0.05*min(scale), max(scale)+0.05*max(scale)]) #plt.axis([0,max(x), 0.85, max(scale)+0.05*max(scale)]) plt.legend(['Scores for ' + key]) #,'local maxima', 'local minima' ]) plt.xlabel('Position') plt.ylabel('Score') plt.savefig('figs/' + key + '.png')
def peak_detect(self, channels): """uses peak detect to detect peaks above at set delta, this may require configuration based on testing. THis gives the start of the tag in each channel Args: channels (tuple): tuple of the three channels gained from isolating the sample Returns: tuple: three peak detections into a tuple """ peak_detect_one = peakdetect.peakdetect(channels[0], x_axis=None, lookahead=1, delta=40000) #TODO peak_detect_two = peakdetect.peakdetect(channels[1], x_axis=None, delta=40000) peak_detect_three = peakdetect.peakdetect(channels[2], x_axis=None, delta=40000) self.get_logger().info('PEAKONE = {0}'.format(peak_detect_one[0])) return peak_detect_one, peak_detect_two, peak_detect_three
def generate_plot(key, my_seq): analysed_seq = ProteinAnalysis(my_seq) l = len(my_seq) window_size = 21 scale = analysed_seq.protein_scale(param_dict=amino_acids, window=window_size, edge=0.75) x = range((window_size+1)/2,len(scale)+(window_size+1)/2) lookahead = 7 minp, maxp = peakdetect(scale, lookahead=(lookahead+1)/2) start = min(x)-1 xpeaks = [xp[0]+(window_size+1)/2 for xp in minp] ypeaks = [scale[xpi-(window_size+1)/2] for xpi in xpeaks] t_x = np.array(scale) added_min = np.where(t_x < 0.9)[0] print(added_min) xdpeaks = [xdp[0]+(window_size+1)/2 for xdp in maxp] ydpeaks = [scale[xdpi-(window_size+1)/2] for xdpi in xdpeaks] num_pos = np.where(np.array(ydpeaks) < 0.9)[0].size print(num_pos) if num_pos == 0 and len(added_min) != 0: added_val = [scale[i] for i in list(added_min)] minimum = added_val.index(min(added_val))-start+2 print(added_min[minimum]) print(added_val[minimum]) xdpeaks.append(added_min[minimum]) ydpeaks.append(added_val[minimum]) print("maxs:",np.array(xpeaks)+start) print("mins:",np.array(xdpeaks)+start) #print(scale) plt.clf() plt.plot(x,scale,'b', xpeaks, ypeaks ,'ro', xdpeaks, ydpeaks ,'go') plt.grid(True) #plt.axis([0,max(x), min(scale)-0.05*min(scale), max(scale)+0.05*max(scale)]) #plt.axis([0,max(x), 0.85, max(scale)+0.05*max(scale)]) plt.legend( ['Scores for '+key])#,'local maxima', 'local minima' ]) plt.xlabel('Position') plt.ylabel('Score') plt.savefig('figs/'+key+'.png')
def main(root, SaveImage): trainPath,\ testPath,\ validPath = getDataPaths (root) dataTrain, labelgetRandDiamTrain = openHDF5(trainPath) dataTest, labelTest = openHDF5(testPath) dataValid, labelValid = openHDF5(validPath) numIndex = getRandomInt(labelTest.shape[0] - 1) data = cleanVect(labelTest[numIndex]) getRandaIMT = data[0] getRandDiam = data[1] pathSave = os.path.join(SaveImage, "img.jpg") x, y = formatList(peakdetect(getRandDiam, lookahead=1)) getPlotPlotIMTDiam(getRandDiam, "Diam", x, y, pathSave)
def plot_cne(name, scores, valid): lookahead = len(scores) / 50 scores = np.array([float(x) for x in scores]) signal = smooth(scores, window_len=40, window='bartlett') maxima = peakdetect(signal, lookahead=lookahead)[0] m_x = np.array([m[0] for m in maxima]) m_y = np.array([m[1] for m in maxima]) plt.plot(range(len(signal)), signal, 'k', m_x, m_y, 'bo') plt.axvspan(valid[0], valid[1], facecolor='r', alpha=0.4) plt.xlabel('alignment') plt.ylabel('score') plt.title(name) fig = plt.gcf() fig.set_size_inches(8, 5) plt.savefig(name + '.png', dpi=140)
def run(): input_np = np.random.rand(1, 150) [0] target_np = np.random.rand(1, 150) [0] print("\n ==> get Variable:") input_th = Variable(torch.from_numpy(input_np),requires_grad=True) target_th = Variable(torch.from_numpy(target_np)) # it get out in torch.DoubleTensor print ("\n ==> get Peaks:") x,peaks = formatList(peakdetect(target_np, lookahead=1)) Loss = SmoothPeakLoss(0.5,peaks) error = Loss (input_th,target_th) error.backward() print("\n ==> Error: \n") print(error.data[0])
def getRubingInfo(GYRO_Z, look_a=4, delta=1500): """ Out: Integer(total number of peaks), Float(average distance of plus peak) Retrun total peak occurences and average time among peaks use GYRO_Z signal which is stable than EOG signal and precise enough to detect nose rubbing input """ res = peakdetect.peakdetect(y_axis=GYRO_Z, lookahead=look_a, delta=delta) num_plus = len(res[0]) num_minus = len(res[1]) plus_xs = np.array(res[0])[:, 0] mean_dx = np.average(np.diff(plus_xs)) return num_plus + num_minus, mean_dx
def addPeaks(self, line): ''' Need to create the points Consider: https://pythonhosted.org/guiqwt/examples.html ''' x = line.xydata[0] y = line.xydata[1] _max, _min = pd.peakdetect(y, x, 5) xm = [p[0] for p in _max] ym = [p[1] for p in _max] xn = [p[0] for p in _min] yn = [p[1] for p in _min] curPeak = np.vstack((xm,ym)) self.peaks.append(curPeak) self.show_peaks(curPeak)
def update_figure(self, output): print "FFT update called" fftX = fftpack.fftfreq(len(output), d=0.1) fftY = fftpack.rfft(output) _max, _min = peakdetect(fftY, fftX, 500, 0.30) xm = [p[0] for p in _max] ym = [abs(p[1]) for p in _max] xn = [p[0] for p in _min] yn = [abs(p[1]) for p in _min] self.axes.hold(True) self.axes.bar(xm, ym, 0.02, color='r') self.axes.bar(xn, yn, 0.02, color='y') #self.axes.plot(xn, yn, 'r') self.draw()
def dump_ss_fraction_peaks(flistfilename, trange=(2,20), cutoff=0.2, binsize=5e-3, lookahead=10): """Plot the peaks in fraction of spiny stellate cells over multiple simulations.""" #data_dict = get_gaba_data_dict(flistfilename) peak_frac_med = defaultdict(list) peak_frac_mean = defaultdict(list) iqr_dict = defaultdict(list) with open('gaba_scale_ss_frac_cutoff_{}_binwidth_{}ms_lookahead_{}.csv'.format(cutoff, binsize*1000, lookahead), 'wb') as fd: writer = csv.writer(fd, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL) writer.writerow(('filename', 'gabascale', 'frac_mean', 'frac_med', 'frac_iqr')) for fname in get_filenames(flistfilename): data = TraubData(makepath(fname)) gaba = dict(data.fdata['/runconfig/GABA']) scale = gaba['conductance_scale'] print fname, gaba hist, bins = data.get_spiking_cell_hist('SpinyStellate', timerange=trange, binsize=binsize, frac=True) peaks, troughs = peakdetect(hist, bins[:-1], lookahead=lookahead) if len(peaks) == 0: print 'No peaks for', data.fdata.filename writer.writerow((fname, scale, '', '', '')) continue x, y = zip(*peaks) x = np.asarray(x) y = np.asarray(y) idx = np.flatnonzero(y > cutoff) frac_med = '' frac_mean = '' iqr = '' if len(idx) > 0: frac_med = np.median(y[idx]) frac_mean = np.mean(y[idx]) iqr = np.diff(np.percentile(y[idx], [25,75])) if len(iqr) > 0: iqr = iqr[0] else: iqr = '' peak_frac_med[scale].append(frac_med) peak_frac_mean[scale].append(frac_mean) iqr_dict[scale].append(iqr) writer.writerow((fname, scale, frac_mean, frac_med, iqr)) return peak_frac_mean, peak_frac_med, iqr_dict
def inputs(filename): """ Takes in the experimental values measured from the experiment that is simmulated to find n2. """ mat = loadmat('../loading_data/LP11_FWM_data.mat') #mat = loadmat(filename) lams_vec_exp= mat['lam_vals'] D = mat['D'] del mat lamp = np.zeros(len(D[0,:])); lams = np.zeros(len(D[0,:])); lami = np.zeros(len(D[0,:])) D_p = np.zeros(len(D[0,:])); D_s = np.zeros(len(D[0,:])); D_i = np.zeros(len(D[0,:])) for i in range(len(D[0,:])): _max, _min = peakdetect(D[:,i], lams_vec_exp[:,i], 50) max_ = np.asanyarray(_max) max_ = max_[np.argsort(max_[:,1])] lamp[i], D_p[i] = max_[-1::,0][0],max_[-1::,1][0] lami[i], D_i[i] = max_[-3::3,0][0],max_[-3::3,1][0] lams[i], D_s[i] = max_[-2::2,0][0],max_[-2::2,1][0] D_p = D_p[0:-3:] D_s = D_s[0:-3:] D_i = D_i[0:-3:] lamp = lamp[0:-3:] lams = lams[0:-3:] lami = lami[0:-3:] P_vec = np.arange(22.7,23.7,2) P_vec +=10 P_vec = dbm_to_w(P_vec) P_signal_vec = dbm_to_w(D_s) - dbm_to_w(D_i) lamp = np.copy(lamp)*1e-9 lams = np.copy(lams)*1e-9 lami = np.copy(lami)*1e-9 AB_measured = np.zeros([3,len(P_vec),len(lams)]) AB_measured[0,0,:] = D_p AB_measured[1,0,:] = D_s AB_measured[2,0,:] = D_i return AB_measured,P_signal_vec,P_vec,lamp,lams,lami
def ranked_peaks(cne_dict, extra): def dist(cne, i): v_avg = (extra[cne]['dr_valid_co']['start'] + extra[cne]['dr_valid_co']['end']) / 2.0 v_i = v_avg - extra[cne]['dr_co']['start'] return abs(i - v_i) results = defaultdict(dict) for cne, scores in cne_dict.iteritems(): lookahead = int(len(scores) / 50) + 1 scores = np.array([float(x) for x in scores]) signal = smooth(scores, window_len=40, window='bartlett') maxima = peakdetect(signal, lookahead=lookahead)[0] maxima.sort(key=itemgetter(1), reverse=True) if not maxima: print cne continue hit_rank = np.argmin(np.array([dist(cne, i) for i, score in maxima])) results[cne]['rank'] = hit_rank + 1 results[cne]['places'] = len(maxima) return results
def dump_syncspike_stats(outfile, dbcnt_file_dict, trange=(2,20), cutoff=0.2, binsize=5e-3, lookahead=3): """Combined statistics for synchrounous fractions and inter burst intervals""" datalist = [] for dbcnt, flist in dbcnt_file_dict.items(): for fname in flist: data = TraubData(makepath(fname)) hist, bins = data.get_spiking_cell_hist('SpinyStellate', timerange=trange, binsize=binsize, frac=True) peaks, troughs = peakdetect(hist, bins[:-1], lookahead=lookahead) time, frac, = zip(*peaks) frac = np.array(frac) time = np.array(time) idx = np.flatnonzero(frac > cutoff) frac = frac[idx].copy() ibi = np.diff(time[idx]) data_stats = { 'filename': fname, 'dbcount': dbcnt, 'frac_mean': np.mean(frac), 'frac_median': np.median(frac), 'frac_iqr': np.diff(np.percentile(frac, [25, 75]))[0], 'frac_sem': stats.sem(frac), 'ibi_mean': np.mean(ibi), 'ibi_median': np.median(ibi), 'ibi_iqr': np.diff(np.percentile(ibi, [25, 75]))[0], 'ibi_sem': stats.sem(ibi) } datalist.append(data_stats) dataframe = pd.DataFrame(datalist, columns=['filename', 'dbcount', 'frac_mean', 'frac_median', 'frac_iqr', 'frac_sem', 'ibi_mean', 'ibi_median', 'ibi_iqr', 'ibi_sem']) dataframe.to_csv(outfile) print 'Saved data in', outfile
def input_powers_wavelengths(): """From the experimental data """ mat = loadmat('../loading_data/LP11_FWM_data.mat') lams_vec_exp = mat['lam_vals'] D = mat['D'] del mat lamp = np.zeros(len(D[0, :])) lams = np.zeros(len(D[0, :])) lami = np.zeros(len(D[0, :])) D_p = np.zeros(len(D[0, :])) D_s = np.zeros(len(D[0, :])) D_i = np.zeros(len(D[0, :])) for i in range(len(D[0, :])): _max, _min = peakdetect(D[:, i], lams_vec_exp[:, i], 50) max_ = np.asanyarray(_max) max_ = max_[np.argsort(max_[:, 1])] lamp[i], D_p[i] = max_[-1::, 0][0], max_[-1::, 1][0] lami[i], D_i[i] = max_[-3::3, 0][0], max_[-3::3, 1][0] lams[i], D_s[i] = max_[-2::2, 0][0], max_[-2::2, 1][0] D_p = D_p[0:-3:] D_s = D_s[0:-3:] D_i = D_i[0:-3:] lamp = lamp[0:-3:] lams = lams[0:-3:] lami = lami[0:-3:] P_vec = np.arange(22.7, 23.7, 2) P_vec += 10 P_vec = dbm_to_w(P_vec) P_signal_vec = dbm_to_w(D_s) - dbm_to_w(D_i) lamp = np.copy(lamp) * 1e-9 lams = np.copy(lams) * 1e-9 lami = np.copy(lami) * 1e-9 return P_vec, P_signal_vec, lamp, lams, lami
def fit_remove_molecule(absorbance, peakwidth, mlcl, wavenumber, look, Delta): mlcl_peaks, mlcl_valleys = pdet.peakdetect(mlcl, lookahead = look, delta = Delta) peak_index, peak_height = zip(*mlcl_peaks) def gauss_func(peak_x, a, x0, sigma): return a*np.exp(-(peak_x-x0)**2/(2*sigma**2)) new_absorbance = absorbance noise = 2.5*np.mean(np.abs(absorbance)) # (arbitraty) estimation of ground noise for ind in peak_index: peak_range = range(ind-peakwidth,ind+peakwidth) peak_x = wavenumber[peak_range] peak_y = absorbance[peak_range] if max(peak_y) >= noise: #0.04: Pick a value that is above ground noise of absorbance len_x = len(peak_x) mean = wavenumber[ind] sigma = np.sqrt(np.dot(peak_x-mean,peak_x-mean)/len_x) magnitude = np.mean(peak_y) popt, pcov = curve_fit(gauss_func, peak_x, peak_y, p0 = [magnitude, mean, sigma]) y_fit = gauss_func(peak_x, *popt) new_absorbance[ind-peakwidth:ind+peakwidth] = absorbance[ind-peakwidth:ind+peakwidth] - y_fit return new_absorbance #if __name__ == '__main__': # Test for load_database_compound #wavenumber = np.loadtxt('D:\Workspace\Breath Analysis\Measurements\Wavenumber.txt').astype(float) #compound_path = 'D:\Workspace\Breath Analysis\Compounds\Acetone\ACETONE_25T.TXT' ##bla = profile.run('load_database_compound(wavenumber,compound_path)') # #bla = load_database_compound(wavenumber,compound_path)# # Test lsqnonlin # bla2 = lsqnonlin() #Test load_measurement #group = 0 #sample_max = 2 #measurement_folder = "D:\\Workspace\\Breath Analysis\\Measurements\\data of 26-8-2014\\data no zeros\\" #bla = load_measurement(measurement_folder, group, sample_max)
def test(): xdata,ydata=load_sample_data() #from scipy.signal import find_peaks_cwt #peak_ind=find_peaks_cwt(ydata,np.arange(1,5)) #from scipy.signal import argrelextrema #peak_ind=argrelextrema(ydata,np.greater) lib_path=r'C:\Users\YinshengGuo\WinPython-64bit-2.7.9.1\yinshengguo_codes\peak_detection' sys.path.append(lib_path) from peakdetect import peakdetect max_pt,min_pt=peakdetect(ydata,lookahead=2,delta=100) num_peaks=len(max_pt) ind_max=[item[0] for item in max_pt] p0=np.zeros(num_peaks*4+1) p0[0]=ydata.min() for i in range(num_peaks): p0[4*i+1]=max_pt[i][0] # peak position p0[4*i+2]=2 # peak width. FIX: magic number p0[4*i+3]=max_pt[i][1] # peak amp p0[4*i+4]=0 # peak offset. FIX: magic number ydata_calc=calc_ydata(xdata,p0) min_res=scipy.optimize.minimize(errfunc_multiLor_min,p0,args=(xdata,ydata),method='Nelder-Mead',options={'xtol':1e-9,'maxfev':1e5}) # check options with scipy.optimize.show_options('minimize','Nelder-Mead') p_min=min_res.x ydata_fit=calc_ydata(xdata,p_min) fig,ax=plt.subplots(nrows=1,ncols=1) ax.plot(xdata,ydata,'b-') ax.plot(xdata[ind_max],ydata[ind_max],'ro') ax.plot(xdata,ydata_calc,'g.') ax.plot(xdata,ydata_fit,'k-') plt.show()
import peakdetect as pd from numpy import genfromtxt import matplotlib.pyplot as plt data = genfromtxt('data/moving-average-results.csv', delimiter=',') avgdata = [data[i][0] for i in range(len(data))] labeldata = [data[i][4]*.75 for i in range(len(data))] indices = [i for i in range(len(data))] # print avgdata # print labeldata plt.plot(indices, labeldata, 'g') plt.plot(indices, avgdata,'r') peaks = pd.peakdetect(avgdata, lookahead=25) peakvals = [peaks[i] for i in range(len(peaks))] peakvalsx = [val[0] for val in peakvals[0]] peakvalsy = [val[1] for val in peakvals[0]] # print peakvals plt.scatter(peakvalsx,peakvalsy) plt.show()
rt_column_name = best_sample + "_rt" int_column_name = best_sample + "_int" rt_list = map(float, row[rt_column_name].split(",")) rt_list2 = list (np.arange(rt_list[0], rt_list[1],rt_list[2])) if (rt_list[1] - rt_list2[-1] >1): rt_list2.append(rt_list[1]) int_list = map(float, row[int_column_name].split(",")) peak_max = [] peak_min = [] if (len(rt_list2) != len(int_list)): print "ERROR in transition name is %s " % transition_name print "rt_list length %d is different from int_list length %d" % (len(rt_list2), len(int_list)) print "rt_list is %s" % rt_list print "rt_list2 is %s" % rt_list2 print "int_list is %s" % int_list else: peak_max, peak_min = peakdetect.peakdetect(int_list, rt_list2, 10.0, 0.3) for peak_max_pair in peak_max: # print "type of peak_max_rt is ", type(peak_max_pair[0]) if abs(peak_max_pair[0] - best_rt) < PEAK_TOLERANCE: #a peak is found for the transition in the rt range, write it to the outfile writer.writerow(row) break
# -*- coding: utf-8 -*- """ Created on Wed Sep 16 14:28:31 2015 @author: stefan.burtscher """ import peakdetect # Auswertungen testen # DETECT PEAKS MW_oben = 250 # obere Grenze der Messdwerte MW_unten = -250 # untere Grenze der Messdwerte N_Klassen = 50 # Anzahl der Klassen Rueckstellw = (MW_oben - MW_unten) / N_Klassen * 1.1# Rückstellwert =Klassenbreit *Faktor, mind 2.5% Messwertbreite max_peaks, min_peaks = peakdetect.peakdetect(data.Sig1, x_axis = data.index ,lookahead = 10, delta = Rueckstellw) #xind_peak = peakdetect.peakdetect(data.Sig1, lookahead = 10, delta = Rueckstellw) ''' returns Index of MAX and MIN Peaks:xind_peak(0) und xind_peak(1) '''
division, unicode_literals, absolute_import) import numpy as np from scipy.optimize import curve_fit import matplotlib.pyplot as plt import peakdetect t, U = np.loadtxt('data.txt', unpack=True) t *= 1e3 plt.plot(t, U, 'b-', label='Gedämpfte Schwingung') maxs, mins = peakdetect.peakdetect(U, t, 4, 1) maxs = np.array(maxs).T mins = np.array(mins).T x = np.linspace(0, plt.xlim()[1]) def e(x, a, b, c): return a * np.exp(b * x) + c popt, pcov = curve_fit(e, maxs[0], maxs[1]) print(popt, np.sqrt(np.diag(pcov)), sep='\n') plt.plot(maxs[0], maxs[1], 'rx', label='Extrema') plt.plot(x, e(x, *popt), 'g-', label='Obere Einhüllende') popt, pcov = curve_fit(e, mins[0], mins[1]) print(popt, np.sqrt(np.diag(pcov)), sep='\n')
def GetPeakPositionCatalog(numSet, numKin, rows, cols, typicalIonDiameter, initialThreshold, darkThreshold, iterations): """This method counts the number of dark ions (and their positions) in an ion chain. It is required that the background image contain a fully illuminated chain of ions. numkin - total number of images in the data array. This method assumes that for each background image taken, there is ((numKin / iterations) - 1) images for comparison. Example: numKin = 50, iterations = 10 for each background image, the following 4 images will be analyzed against it. (1 + 4) * 10 = 50 1 background, 4 images. Therefore: 10 'sets' of 5 images. NOTE: Assumes collectData was just run! """ numSet += 6 numberImagesInSet = (numKin / iterations) numberImagesToAnalyze = (numKin / iterations) - 1 # 3D array of each image # try: # data = np.reshape(np.array(self.camera.imageArray), (numKin, rows, cols)) # except ValueError: # raise Exception("Trying to analyze more images than there is in the data? Image region correct?") # data = self.imageArray # ###### TESTING TESTING TESTING 123 ################ # rawdata1 = np.loadtxt(r'C:\Users\lattice\Documents\Andor\jun12\062812\7\image19') # # rawdata2 = np.loadtxt(r'C:\Users\lattice\Documents\Andor\jun12\062812\7\image20') # # rawdata3 = np.loadtxt(r'C:\Users\lattice\Documents\Andor\jun12\062812\7\image21') # # # ion swap arr = [[] for i in range(3)] for j in range(3): arr[j] = np.loadtxt(r'C:\Users\lattice\Documents\Andor\jun12\062812\7\image-1-' + str(3*numSet + j + 1)) # arr[3] = rawdata1 # arr[4] = rawdata1 # arr[5] = rawdata3 # data = np.array(arr) # # ###### TESTING TESTING TESTING 123 ################ peakPositionCatalog = [[] for i in range(iterations)] """ peakPositionCatalog is a list of lists of peak positions Ex: peakPositionCatalog[1st iteration] = [[peak positions background], [peak positions analyzed image], [peak positions analyzed image]] len(peakPositionCatalog[0][1]) = number of dark ions for the first image to be analyzed """ for imageSet in np.arange(iterations): sumArray = [] for image in np.arange(numberImagesInSet): """ The image, assumed to be have more columns than rows, will first be analyzed in the axial direction. The sums of the intensities will be collected in the axial direction first. This will help narrow down which section of the image, in order to exclude noise. Example: | [[ # # # # # # # # # # # # # # # # # # # # # #], | 0 [ # # # # # # # # # # # # # # # # # # # # # #], avgIonDiameter | [ # # # # # # # # # # # # # # # # # # # # # #], | | [ * * * * * * * * * * * * * * * * * * * * * *], <-- strip of highest intensity, 1 [ * * * * * * * * * * * * * * * * * * * * * *], <-- will only be used for | [ * * * * * * * * * * * * * * * * * * * * * *], <-- proceeding calculations | [ % % % % % % % % % % % % % % % % % % % % % %], 2 [ % % % % % % % % % % % % % % % % % % % % % %], | [ % % % % % % % % % % % % % % % % % % % % % %]] Axial """ axialSumSegments = [] axialData = np.sum(data[numberImagesInSet*imageSet + image], 1) # 1D vector: sum of intensities in the axial direction. length = rows """ choose each strip by only analyzing the 1D vector of sums [ # # # * * * % % %] -> [# * %] 0 1 2 ^ ^ | | Segment most intense sum """ intensitySum = 0 cnt = 0 for i in np.arange(rows): intensitySum += axialData[i] cnt += 1 if (cnt == typicalIonDiameter): axialSumSegments.append(intensitySum) cnt = 0 intensitySum = 0 # find the index of the strip with the highest intensity mostIntenseRegionIndex = np.where(axialSumSegments == np.max(axialSumSegments))[0][0] """ use this strip to create the 1-dimensional array of intensity sums in the radial direction [ * * * * * * * * * * * * * * * * * * * * * *] 1D vector of sums in the radial direction ^ length = cols | Used to find peaks [[ * * * * * * * * * * * * * * * * * * * * * *], most [ * * * * * * * * * * * * * * * * * * * * * *], intense [ * * * * * * * * * * * * * * * * * * * * * *]] strip """ mostIntenseData = data[numberImagesInSet*imageSet + image][(mostIntenseRegionIndex*typicalIonDiameter):(mostIntenseRegionIndex*typicalIonDiameter + typicalIonDiameter), :] mostIntenseDataSums = np.sum(mostIntenseData, 0) / typicalIonDiameter #1D vector """ | | | | | _____/|\____/|\____/|\____/|\____/|\_____ background image sum ___________________ ___________________ dark ion image sum (background subtracted out) \|/ | """ sumArray.append(mostIntenseDataSums) ########### find the number of ions, peak positions of initial image ########### initialDenoised = ndimage.gaussian_filter(sumArray[0], 2) initialMaxPeaks, initialMinPeaks = peakdetect.peakdetect(initialDenoised, range(cols), 1, 1) initialPeakPositions = [] for peak in initialMaxPeaks: # peak = [position (pixel), intensity] if peak[1] > initialThreshold: initialPeakPositions.append(peak[0]) # print 'initial peak positions: ', initialPeakPositions # print 'number of ions: ', len(initialPeakPositions) pyplot.figure() xaxis = range(cols) pyplot.plot(xaxis, initialDenoised, label=('initial' + ' ' + str(numSet + 1))) peakPositionCatalog[imageSet].append(initialPeakPositions) ########### find the number of dark ions, peak positions of analyzed images ########### for image in np.arange(numberImagesToAnalyze): subtractedData = sumArray[(image+1)] - sumArray[0] subtractedDataDenoised = ndimage.gaussian_filter(subtractedData, 2) darkMaxPeaks, darkMinPeaks = peakdetect.peakdetect(subtractedDataDenoised, range(cols), 1, 1) darkPeakPositions = [] for peak in darkMinPeaks: if peak[1] < darkThreshold: darkPeakPositions.append(peak[0]) # print 'initial dark peak positions: ', darkPeakPositions # print 'number of dark ions: ', len(darkPeakPositions) peakPositionCatalog[imageSet].append(darkPeakPositions) # we're hoping there is only one peak here! pyplot.plot(xaxis, subtractedDataDenoised, label=('dark'+str(image) + ' ' + str(numSet + 1))) pyplot.legend(loc='best')
pyplot.plot(range(rows), data, label=str(j+1)) pyplot.xlabel('Axial Direction (pixels)') pyplot.ylabel('Average Intensity (counts/sec)') # find the number of ions # start with the highest peaks and apply gaussians? gauss_denoised = ndimage.gaussian_filter(data, 2) pyplot.figure(2) #gauss_denoised = gauss_denoised[0:rows] pyplot.plot(range(rows), gauss_denoised, label=(str(s) + '-'+ str(j+1))) pyplot.legend(loc='best') pyplot.xlabel('Axial Direction (pixels)') pyplot.ylabel('Average Intensity (counts/sec)') maxPeaks, minPeaks = peakdetect.peakdetect(gauss_denoised, range(rows), 1, 1) # # find 1st moment of a gaussian around each peak # moments = [] # for peak in maxPeaks: # print 'peak: ',peak[0] # minRange = peak[0] - 2*typicalIonDiameter # maxRange = peak[0] + 2*typicalIonDiameter # print 'range: ', minRange, maxRange # peakData = gauss_denoised[minRange:maxRange] # X = np.arange(minRange, maxRange) # peakMean = np.sum(X*peakData)/np.sum(peakData) # peakWidth = np.sqrt(abs(np.sum((X-peakMean)**2*peakData)/np.sum(peakData))) # moments.append(peakWidth) # # print moments
print "Image " + str(j) + " - Region picked with highest intensity: ", maxIndex # use this strip to create the 1-dimensional array of intensity sums procdata = rawdata[ :, (maxIndex * typicalIonDiameter) : (maxIndex * typicalIonDiameter + typicalIonDiameter) ] data = np.array(rows) data = np.sum(procdata, 1) / typicalIonDiameter dataArray.append(data) ########### find the number of ions, peak positions of initial image ########### initialData_denoised = ndimage.gaussian_filter(dataArray[0], 2) # initialData_denoised = initialData_denoised[0:rows] # ? initialMaxPeaks, initialMinPeaks = peakdetect.peakdetect(initialData_denoised, range(rows), 1, 1) initialPeakPositions = [] for q in initialMaxPeaks: if q[1] > minimumIonIntensity: initialPeakPositions.append(q[0]) print "initial peak positions: ", initialPeakPositions if len(initialPeakPositions) != expectedNumberOfIons: ########### find the number of ions, peak positions of initial dark image ########### t3 = time.clock() initialDarkImageData = dataArray[1] - dataArray[0] uncorrectedInitialDarkImageData_denoised = ndimage.gaussian_filter(dataArray[1], 2) initialDarkImageData_denoised = ndimage.gaussian_filter(initialDarkImageData, 2) initialDarkMaxPeaks, initialDarkMinPeaks = peakdetect.peakdetect(
def calibrate(s): COL_X = 1 COL_Y = 2 COL_Z = 3 COL_M = 4 from peakdetect import peakdetect try: x_peaks_valleys = peakdetect(s[:,COL_X], lookahead=500) y_peaks_valleys = peakdetect(s[:,COL_Y], lookahead=500) z_peaks_valleys = peakdetect(s[:,COL_Z], lookahead=500) # print(x_peaks_valleys, s[:,COL_X])#[x_peaks_valleys]) # if(DEBUG): print(x_peaks_valleys)#, len(np.array(x_peaks_valleys[1])[:,1])) if(len(x_peaks_valleys[1]) > 0): x_min = np.min(np.array(x_peaks_valleys[1])[:,1])#np.min(s[:,COL_X]) else: if(DEBUG): print("x not enough valleys") x_min = np.min(s[:,COL_X]) pass # if(DEBUG): print(x_peaks_valleys)#, len(np.array(x_peaks_valleys[0])[:,1])) if(len(x_peaks_valleys[0]) > 0): x_max = np.max(np.array(x_peaks_valleys[0])[:,1])#s[:,COL_X]) else: if(DEBUG): print("x not enough peaks") x_max = np.max(s[:,COL_X]) pass # if(len(x_peaks_valleys[1]) == 0) and (len(x_peaks_valleys[0]) == 0): # s[:,COL_X] = 0 # x_min = x_max = 0 # if(DEBUG): print(y_peaks_valleys)#, len(np.array(y_peaks_valleys[1])[:,1])) if(len(y_peaks_valleys[1]) > 0): y_min = np.min(np.array(y_peaks_valleys[1])[:,1]) else: if(DEBUG): print("y not enough valleys") y_min = np.min(s[:,COL_Y]) pass # if(DEBUG): print(y_peaks_valleys)#, len(np.array(y_peaks_valleys[0])[:,1])) if(len(y_peaks_valleys[0]) > 0): y_max = np.max(np.array(y_peaks_valleys[0])[:,1]) else: if(DEBUG): print("y not enough peaks") y_max = np.max(s[:,COL_Y]) pass # if(len(y_peaks_valleys[1]) == 0) and (len(y_peaks_valleys[0]) == 0): # s[:,COL_Y] = 0 # y_min = y_max = 0 # if(DEBUG): print(z_peaks_valleys)#, len(np.array(z_peaks_valleys[1])[:,1])) if(len(z_peaks_valleys[1]) > 0): z_min = np.min(np.array(z_peaks_valleys[1])[:,1]) else: if(DEBUG): print("z not enough valleys") z_min = np.min(s[:,COL_Z]) pass # if(DEBUG): print(z_peaks_valleys)#, len(np.array(z_peaks_valleys[0])[:,1])) if(len(z_peaks_valleys[0]) > 0): z_max = np.max(np.array(z_peaks_valleys[0])[:,1]) else: if(DEBUG): print("z not enough peaks") z_max = np.max(s[:,COL_Z]) pass # if(len(z_peaks_valleys[1]) == 0) and (len(z_peaks_valleys[0]) == 0): # s[:,COL_Z] = 0 # z_min = z_max = 0 except: pass # offset_x = -(0 - x_min - (x_max - x_min)/2) # offset_y = -(0 - y_min - (y_max - y_min)/2) # offset_z = -(0 - z_min - (z_max - z_min)/2) # if(DEBUG): print(x_min, x_max, y_min, y_max, z_min, z_max) # s[:,COL_X] = s[:,COL_X] - offset_x # s[:,COL_Y] = s[:,COL_Y] - offset_y # s[:,COL_Z] = s[:,COL_Z] - offset_z offset_x = x_min + (x_max - x_min)/2 offset_y = y_min + (y_max - y_min)/2 offset_z = z_min + (z_max - z_min)/2 if(DEBUG): print(x_min, x_max, y_min, y_max, z_min, z_max) s[:,COL_X] = s[:,COL_X] - offset_x s[:,COL_Y] = s[:,COL_Y] - offset_y s[:,COL_Z] = s[:,COL_Z] - offset_z s[:,COL_M] = np.sqrt(s[:,COL_X]**2 + s[:,COL_Y]**2 + s[:,COL_Z]**2) return s
def peak_detect(self): peakdetect._datacheck_peakdetect(self.time_points, self.data_points) peaks = peakdetect.peakdetect(self.data_points, self.time_points) return peaks